From d5c67df8fa9cd50bbc88a329402a3ad04d6867cb Mon Sep 17 00:00:00 2001 From: Marco Braga Date: Mon, 25 Aug 2025 19:09:08 -0300 Subject: [PATCH 1/3] feat/aws/svc-nlb: Update vendor for kube 1.34 and CCM Update kubernetes/cloud-provider-aws lib to use latest support of Service type-loadBalancer NLB with support of Security Groups. Also update the openshift clients with support of kube 1.34. --- .../checkcompilerdirectives.go | 11 +- .../checknoglobals/check_no_globals.go | 14 - vendor/github.com/4meepo/tagalign/.gitignore | 1 + .../4meepo/tagalign/.goreleaser.yml | 4 +- vendor/github.com/4meepo/tagalign/options.go | 7 - vendor/github.com/4meepo/tagalign/tagalign.go | 187 +- .../Antonboom/nilnil/pkg/analyzer/analyzer.go | 2 +- .../fatcontext/pkg/analyzer/analyzer.go | 258 +- .../go-exhaustruct/v3/analyzer/analyzer.go | 2 +- .../OpenPeeDeeP/depguard/v2/README.md | 30 +- .../OpenPeeDeeP/depguard/v2/depguard.go | 6 +- .../OpenPeeDeeP/depguard/v2/settings.go | 14 +- .../alecthomas/go-check-sumtype/.golangci.yml | 92 + .../alecthomas/go-check-sumtype/README.md | 6 + .../alecthomas/go-check-sumtype/check.go | 10 +- .../alecthomas/go-check-sumtype/config.go | 3 + .../alecthomas/go-check-sumtype/def.go | 30 +- .../github.com/alingse/nilnesserr/.gitignore | 25 + .../alingse/nilnesserr/.golangci.yaml | 66 + .../go-diff => alingse/nilnesserr}/LICENSE | 5 +- .../github.com/alingse/nilnesserr/README.md | 74 + .../internal/typeparams/coretype.go | 122 + .../internal/typeparams/normalize.go | 200 + .../internal/typeparams/termlist.go | 163 + .../internal/typeparams/typeterm.go | 166 + .../github.com/alingse/nilnesserr/linter.go | 48 + .../github.com/alingse/nilnesserr/nilerr.go | 83 + .../github.com/alingse/nilnesserr/nilness.go | 374 + .../aws/aws-sdk-go-v2/aws/config.go | 12 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 2 +- .../internal/configsources/CHANGELOG.md | 34 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.go | 139 +- .../endpoints/awsrulesfn/partitions.json | 69 +- .../internal/endpoints/v2/CHANGELOG.md | 34 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/ec2/CHANGELOG.md | 160 + .../aws-sdk-go-v2/service/ec2/api_client.go | 96 +- .../ec2/api_op_AcceptAddressTransfer.go | 30 + ...ceptCapacityReservationBillingOwnership.go | 30 + ...op_AcceptReservedInstancesExchangeQuote.go | 30 + ...ansitGatewayMulticastDomainAssociations.go | 30 + ...p_AcceptTransitGatewayPeeringAttachment.go | 30 + ...pi_op_AcceptTransitGatewayVpcAttachment.go | 30 + .../api_op_AcceptVpcEndpointConnections.go | 30 + .../ec2/api_op_AcceptVpcPeeringConnection.go | 30 + .../service/ec2/api_op_AdvertiseByoipCidr.go | 32 +- .../service/ec2/api_op_AllocateAddress.go | 30 + .../service/ec2/api_op_AllocateHosts.go | 30 + .../ec2/api_op_AllocateIpamPoolCidr.go | 30 + ...ySecurityGroupsToClientVpnTargetNetwork.go | 30 + .../service/ec2/api_op_AssignIpv6Addresses.go | 30 + .../ec2/api_op_AssignPrivateIpAddresses.go | 30 + .../api_op_AssignPrivateNatGatewayAddress.go | 30 + .../service/ec2/api_op_AssociateAddress.go | 30 + ...ssociateCapacityReservationBillingOwner.go | 30 + .../api_op_AssociateClientVpnTargetNetwork.go | 30 + .../ec2/api_op_AssociateDhcpOptions.go | 30 + ...i_op_AssociateEnclaveCertificateIamRole.go | 30 + .../ec2/api_op_AssociateIamInstanceProfile.go | 30 + .../api_op_AssociateInstanceEventWindow.go | 30 + .../service/ec2/api_op_AssociateIpamByoasn.go | 30 + .../api_op_AssociateIpamResourceDiscovery.go | 30 + .../ec2/api_op_AssociateNatGatewayAddress.go | 30 + .../ec2/api_op_AssociateRouteServer.go | 30 + .../service/ec2/api_op_AssociateRouteTable.go | 34 + .../ec2/api_op_AssociateSecurityGroupVpc.go | 32 +- .../ec2/api_op_AssociateSubnetCidrBlock.go | 30 + ..._AssociateTransitGatewayMulticastDomain.go | 30 + ...i_op_AssociateTransitGatewayPolicyTable.go | 30 + ...pi_op_AssociateTransitGatewayRouteTable.go | 30 + .../ec2/api_op_AssociateTrunkInterface.go | 30 + .../ec2/api_op_AssociateVpcCidrBlock.go | 30 + .../ec2/api_op_AttachClassicLinkVpc.go | 30 + .../ec2/api_op_AttachInternetGateway.go | 30 + .../ec2/api_op_AttachNetworkInterface.go | 30 + ...pi_op_AttachVerifiedAccessTrustProvider.go | 30 + .../service/ec2/api_op_AttachVolume.go | 60 +- .../service/ec2/api_op_AttachVpnGateway.go | 30 + .../ec2/api_op_AuthorizeClientVpnIngress.go | 30 + .../api_op_AuthorizeSecurityGroupEgress.go | 30 + .../api_op_AuthorizeSecurityGroupIngress.go | 30 + .../service/ec2/api_op_BundleInstance.go | 30 + .../service/ec2/api_op_CancelBundleTask.go | 30 + .../ec2/api_op_CancelCapacityReservation.go | 30 + .../api_op_CancelCapacityReservationFleets.go | 30 + .../ec2/api_op_CancelConversionTask.go | 30 + .../api_op_CancelDeclarativePoliciesReport.go | 30 + .../service/ec2/api_op_CancelExportTask.go | 30 + .../ec2/api_op_CancelImageLaunchPermission.go | 30 + .../service/ec2/api_op_CancelImportTask.go | 30 + .../api_op_CancelReservedInstancesListing.go | 30 + .../ec2/api_op_CancelSpotFleetRequests.go | 30 + .../ec2/api_op_CancelSpotInstanceRequests.go | 30 + .../ec2/api_op_ConfirmProductInstance.go | 30 + .../service/ec2/api_op_CopyFpgaImage.go | 30 + .../service/ec2/api_op_CopyImage.go | 164 +- .../service/ec2/api_op_CopySnapshot.go | 65 +- .../ec2/api_op_CreateCapacityReservation.go | 38 +- ...op_CreateCapacityReservationBySplitting.go | 30 + .../api_op_CreateCapacityReservationFleet.go | 30 + .../ec2/api_op_CreateCarrierGateway.go | 30 + .../ec2/api_op_CreateClientVpnEndpoint.go | 60 +- .../ec2/api_op_CreateClientVpnRoute.go | 30 + .../service/ec2/api_op_CreateCoipCidr.go | 30 + .../service/ec2/api_op_CreateCoipPool.go | 30 + .../ec2/api_op_CreateCustomerGateway.go | 34 +- .../service/ec2/api_op_CreateDefaultSubnet.go | 40 +- .../service/ec2/api_op_CreateDefaultVpc.go | 30 + ...op_CreateDelegateMacVolumeOwnershipTask.go | 269 + .../service/ec2/api_op_CreateDhcpOptions.go | 30 + .../api_op_CreateEgressOnlyInternetGateway.go | 30 + .../service/ec2/api_op_CreateFleet.go | 30 + .../service/ec2/api_op_CreateFlowLogs.go | 30 + .../service/ec2/api_op_CreateFpgaImage.go | 30 + .../service/ec2/api_op_CreateImage.go | 60 +- .../ec2/api_op_CreateImageUsageReport.go | 260 + .../api_op_CreateInstanceConnectEndpoint.go | 54 +- .../ec2/api_op_CreateInstanceEventWindow.go | 30 + .../ec2/api_op_CreateInstanceExportTask.go | 30 + .../ec2/api_op_CreateInternetGateway.go | 30 + .../service/ec2/api_op_CreateIpam.go | 30 + ...teIpamExternalResourceVerificationToken.go | 30 + .../service/ec2/api_op_CreateIpamPool.go | 30 + .../ec2/api_op_CreateIpamResourceDiscovery.go | 30 + .../service/ec2/api_op_CreateIpamScope.go | 30 + .../service/ec2/api_op_CreateKeyPair.go | 30 + .../ec2/api_op_CreateLaunchTemplate.go | 30 + .../ec2/api_op_CreateLaunchTemplateVersion.go | 30 + .../ec2/api_op_CreateLocalGatewayRoute.go | 30 + .../api_op_CreateLocalGatewayRouteTable.go | 30 + ...teTableVirtualInterfaceGroupAssociation.go | 30 + ...ateLocalGatewayRouteTableVpcAssociation.go | 30 + ...i_op_CreateLocalGatewayVirtualInterface.go | 30 + ...CreateLocalGatewayVirtualInterfaceGroup.go | 30 + ...stemIntegrityProtectionModificationTask.go | 312 + .../ec2/api_op_CreateManagedPrefixList.go | 30 + .../service/ec2/api_op_CreateNatGateway.go | 30 + .../service/ec2/api_op_CreateNetworkAcl.go | 30 + .../ec2/api_op_CreateNetworkAclEntry.go | 30 + ...api_op_CreateNetworkInsightsAccessScope.go | 30 + .../ec2/api_op_CreateNetworkInsightsPath.go | 30 + .../ec2/api_op_CreateNetworkInterface.go | 30 + ...api_op_CreateNetworkInterfacePermission.go | 30 + .../ec2/api_op_CreatePlacementGroup.go | 30 + .../ec2/api_op_CreatePublicIpv4Pool.go | 30 + .../ec2/api_op_CreateReplaceRootVolumeTask.go | 30 + .../api_op_CreateReservedInstancesListing.go | 30 + .../ec2/api_op_CreateRestoreImageTask.go | 38 +- .../service/ec2/api_op_CreateRoute.go | 33 + .../service/ec2/api_op_CreateRouteServer.go | 30 + .../ec2/api_op_CreateRouteServerEndpoint.go | 30 + .../ec2/api_op_CreateRouteServerPeer.go | 30 + .../service/ec2/api_op_CreateRouteTable.go | 30 + .../service/ec2/api_op_CreateSecurityGroup.go | 30 + .../service/ec2/api_op_CreateSnapshot.go | 33 +- .../service/ec2/api_op_CreateSnapshots.go | 32 +- .../api_op_CreateSpotDatafeedSubscription.go | 30 + .../ec2/api_op_CreateStoreImageTask.go | 38 +- .../service/ec2/api_op_CreateSubnet.go | 30 + .../ec2/api_op_CreateSubnetCidrReservation.go | 30 + .../service/ec2/api_op_CreateTags.go | 30 + .../ec2/api_op_CreateTrafficMirrorFilter.go | 30 + .../api_op_CreateTrafficMirrorFilterRule.go | 30 + .../ec2/api_op_CreateTrafficMirrorSession.go | 30 + .../ec2/api_op_CreateTrafficMirrorTarget.go | 30 + .../ec2/api_op_CreateTransitGateway.go | 30 + .../ec2/api_op_CreateTransitGatewayConnect.go | 30 + .../api_op_CreateTransitGatewayConnectPeer.go | 30 + ..._op_CreateTransitGatewayMulticastDomain.go | 30 + ...p_CreateTransitGatewayPeeringAttachment.go | 30 + .../api_op_CreateTransitGatewayPolicyTable.go | 30 + ...CreateTransitGatewayPrefixListReference.go | 30 + .../ec2/api_op_CreateTransitGatewayRoute.go | 30 + .../api_op_CreateTransitGatewayRouteTable.go | 30 + ...ateTransitGatewayRouteTableAnnouncement.go | 30 + ...pi_op_CreateTransitGatewayVpcAttachment.go | 30 + .../api_op_CreateVerifiedAccessEndpoint.go | 30 + .../ec2/api_op_CreateVerifiedAccessGroup.go | 30 + .../api_op_CreateVerifiedAccessInstance.go | 30 + ...pi_op_CreateVerifiedAccessTrustProvider.go | 30 + .../service/ec2/api_op_CreateVolume.go | 44 +- .../service/ec2/api_op_CreateVpc.go | 30 + ..._op_CreateVpcBlockPublicAccessExclusion.go | 30 + .../service/ec2/api_op_CreateVpcEndpoint.go | 30 + ...CreateVpcEndpointConnectionNotification.go | 30 + ...p_CreateVpcEndpointServiceConfiguration.go | 30 + .../ec2/api_op_CreateVpcPeeringConnection.go | 30 + .../service/ec2/api_op_CreateVpnConnection.go | 35 + .../ec2/api_op_CreateVpnConnectionRoute.go | 30 + .../service/ec2/api_op_CreateVpnGateway.go | 30 + .../ec2/api_op_DeleteCarrierGateway.go | 30 + .../ec2/api_op_DeleteClientVpnEndpoint.go | 30 + .../ec2/api_op_DeleteClientVpnRoute.go | 30 + .../service/ec2/api_op_DeleteCoipCidr.go | 30 + .../service/ec2/api_op_DeleteCoipPool.go | 30 + .../ec2/api_op_DeleteCustomerGateway.go | 30 + .../service/ec2/api_op_DeleteDhcpOptions.go | 30 + .../api_op_DeleteEgressOnlyInternetGateway.go | 30 + .../service/ec2/api_op_DeleteFleets.go | 30 + .../service/ec2/api_op_DeleteFlowLogs.go | 30 + .../service/ec2/api_op_DeleteFpgaImage.go | 30 + .../ec2/api_op_DeleteImageUsageReport.go | 199 + .../api_op_DeleteInstanceConnectEndpoint.go | 30 + .../ec2/api_op_DeleteInstanceEventWindow.go | 30 + .../ec2/api_op_DeleteInternetGateway.go | 30 + .../service/ec2/api_op_DeleteIpam.go | 30 + ...teIpamExternalResourceVerificationToken.go | 30 + .../service/ec2/api_op_DeleteIpamPool.go | 30 + .../ec2/api_op_DeleteIpamResourceDiscovery.go | 30 + .../service/ec2/api_op_DeleteIpamScope.go | 30 + .../service/ec2/api_op_DeleteKeyPair.go | 30 + .../ec2/api_op_DeleteLaunchTemplate.go | 30 + .../api_op_DeleteLaunchTemplateVersions.go | 30 + .../ec2/api_op_DeleteLocalGatewayRoute.go | 30 + .../api_op_DeleteLocalGatewayRouteTable.go | 30 + ...teTableVirtualInterfaceGroupAssociation.go | 30 + ...eteLocalGatewayRouteTableVpcAssociation.go | 30 + ...i_op_DeleteLocalGatewayVirtualInterface.go | 30 + ...DeleteLocalGatewayVirtualInterfaceGroup.go | 30 + .../ec2/api_op_DeleteManagedPrefixList.go | 30 + .../service/ec2/api_op_DeleteNatGateway.go | 30 + .../service/ec2/api_op_DeleteNetworkAcl.go | 30 + .../ec2/api_op_DeleteNetworkAclEntry.go | 30 + ...api_op_DeleteNetworkInsightsAccessScope.go | 30 + ...eleteNetworkInsightsAccessScopeAnalysis.go | 30 + .../api_op_DeleteNetworkInsightsAnalysis.go | 30 + .../ec2/api_op_DeleteNetworkInsightsPath.go | 30 + .../ec2/api_op_DeleteNetworkInterface.go | 30 + ...api_op_DeleteNetworkInterfacePermission.go | 30 + .../ec2/api_op_DeletePlacementGroup.go | 30 + .../ec2/api_op_DeletePublicIpv4Pool.go | 30 + .../api_op_DeleteQueuedReservedInstances.go | 30 + .../service/ec2/api_op_DeleteRoute.go | 30 + .../service/ec2/api_op_DeleteRouteServer.go | 30 + .../ec2/api_op_DeleteRouteServerEndpoint.go | 30 + .../ec2/api_op_DeleteRouteServerPeer.go | 30 + .../service/ec2/api_op_DeleteRouteTable.go | 30 + .../service/ec2/api_op_DeleteSecurityGroup.go | 30 + .../service/ec2/api_op_DeleteSnapshot.go | 30 + .../api_op_DeleteSpotDatafeedSubscription.go | 30 + .../service/ec2/api_op_DeleteSubnet.go | 30 + .../ec2/api_op_DeleteSubnetCidrReservation.go | 30 + .../service/ec2/api_op_DeleteTags.go | 30 + .../ec2/api_op_DeleteTrafficMirrorFilter.go | 30 + .../api_op_DeleteTrafficMirrorFilterRule.go | 30 + .../ec2/api_op_DeleteTrafficMirrorSession.go | 30 + .../ec2/api_op_DeleteTrafficMirrorTarget.go | 30 + .../ec2/api_op_DeleteTransitGateway.go | 30 + .../ec2/api_op_DeleteTransitGatewayConnect.go | 30 + .../api_op_DeleteTransitGatewayConnectPeer.go | 30 + ..._op_DeleteTransitGatewayMulticastDomain.go | 30 + ...p_DeleteTransitGatewayPeeringAttachment.go | 30 + .../api_op_DeleteTransitGatewayPolicyTable.go | 30 + ...DeleteTransitGatewayPrefixListReference.go | 30 + .../ec2/api_op_DeleteTransitGatewayRoute.go | 30 + .../api_op_DeleteTransitGatewayRouteTable.go | 30 + ...eteTransitGatewayRouteTableAnnouncement.go | 30 + ...pi_op_DeleteTransitGatewayVpcAttachment.go | 30 + .../api_op_DeleteVerifiedAccessEndpoint.go | 30 + .../ec2/api_op_DeleteVerifiedAccessGroup.go | 30 + .../api_op_DeleteVerifiedAccessInstance.go | 30 + ...pi_op_DeleteVerifiedAccessTrustProvider.go | 30 + .../service/ec2/api_op_DeleteVolume.go | 30 + .../service/ec2/api_op_DeleteVpc.go | 30 + ..._op_DeleteVpcBlockPublicAccessExclusion.go | 30 + ...eleteVpcEndpointConnectionNotifications.go | 30 + ..._DeleteVpcEndpointServiceConfigurations.go | 30 + .../service/ec2/api_op_DeleteVpcEndpoints.go | 30 + .../ec2/api_op_DeleteVpcPeeringConnection.go | 30 + .../service/ec2/api_op_DeleteVpnConnection.go | 30 + .../ec2/api_op_DeleteVpnConnectionRoute.go | 30 + .../service/ec2/api_op_DeleteVpnGateway.go | 30 + .../ec2/api_op_DeprovisionByoipCidr.go | 34 +- .../ec2/api_op_DeprovisionIpamByoasn.go | 30 + .../ec2/api_op_DeprovisionIpamPoolCidr.go | 30 + .../api_op_DeprovisionPublicIpv4PoolCidr.go | 35 +- .../service/ec2/api_op_DeregisterImage.go | 62 +- ...sterInstanceEventNotificationAttributes.go | 30 + ...sterTransitGatewayMulticastGroupMembers.go | 30 + ...sterTransitGatewayMulticastGroupSources.go | 30 + .../ec2/api_op_DescribeAccountAttributes.go | 30 + .../ec2/api_op_DescribeAddressTransfers.go | 30 + .../service/ec2/api_op_DescribeAddresses.go | 30 + .../ec2/api_op_DescribeAddressesAttribute.go | 30 + .../ec2/api_op_DescribeAggregateIdFormat.go | 30 + .../ec2/api_op_DescribeAvailabilityZones.go | 30 + ...wsNetworkPerformanceMetricSubscriptions.go | 30 + .../service/ec2/api_op_DescribeBundleTasks.go | 30 + .../service/ec2/api_op_DescribeByoipCidrs.go | 36 +- ...p_DescribeCapacityBlockExtensionHistory.go | 30 + ...DescribeCapacityBlockExtensionOfferings.go | 30 + .../api_op_DescribeCapacityBlockOfferings.go | 40 +- .../ec2/api_op_DescribeCapacityBlockStatus.go | 312 + .../ec2/api_op_DescribeCapacityBlocks.go | 326 + ...cribeCapacityReservationBillingRequests.go | 30 + ...pi_op_DescribeCapacityReservationFleets.go | 30 + .../api_op_DescribeCapacityReservations.go | 30 + .../ec2/api_op_DescribeCarrierGateways.go | 30 + .../api_op_DescribeClassicLinkInstances.go | 30 + ..._op_DescribeClientVpnAuthorizationRules.go | 30 + .../api_op_DescribeClientVpnConnections.go | 30 + .../ec2/api_op_DescribeClientVpnEndpoints.go | 30 + .../ec2/api_op_DescribeClientVpnRoutes.go | 30 + .../api_op_DescribeClientVpnTargetNetworks.go | 30 + .../service/ec2/api_op_DescribeCoipPools.go | 30 + .../ec2/api_op_DescribeConversionTasks.go | 30 + .../ec2/api_op_DescribeCustomerGateways.go | 30 + ...i_op_DescribeDeclarativePoliciesReports.go | 30 + .../service/ec2/api_op_DescribeDhcpOptions.go | 30 + ...i_op_DescribeEgressOnlyInternetGateways.go | 30 + .../service/ec2/api_op_DescribeElasticGpus.go | 30 + .../ec2/api_op_DescribeExportImageTasks.go | 30 + .../service/ec2/api_op_DescribeExportTasks.go | 30 + .../ec2/api_op_DescribeFastLaunchImages.go | 30 + .../api_op_DescribeFastSnapshotRestores.go | 30 + .../ec2/api_op_DescribeFleetHistory.go | 30 + .../ec2/api_op_DescribeFleetInstances.go | 30 + .../service/ec2/api_op_DescribeFleets.go | 30 + .../service/ec2/api_op_DescribeFlowLogs.go | 30 + .../ec2/api_op_DescribeFpgaImageAttribute.go | 30 + .../service/ec2/api_op_DescribeFpgaImages.go | 30 + ...api_op_DescribeHostReservationOfferings.go | 30 + .../ec2/api_op_DescribeHostReservations.go | 30 + .../service/ec2/api_op_DescribeHosts.go | 30 + ..._DescribeIamInstanceProfileAssociations.go | 30 + .../service/ec2/api_op_DescribeIdFormat.go | 30 + .../ec2/api_op_DescribeIdentityIdFormat.go | 30 + .../ec2/api_op_DescribeImageAttribute.go | 36 +- .../ec2/api_op_DescribeImageReferences.go | 334 + .../api_op_DescribeImageUsageReportEntries.go | 327 + .../ec2/api_op_DescribeImageUsageReports.go | 541 + .../service/ec2/api_op_DescribeImages.go | 43 +- .../ec2/api_op_DescribeImportImageTasks.go | 30 + .../ec2/api_op_DescribeImportSnapshotTasks.go | 30 + .../ec2/api_op_DescribeInstanceAttribute.go | 30 + ...api_op_DescribeInstanceConnectEndpoints.go | 30 + ...op_DescribeInstanceCreditSpecifications.go | 30 + ...ribeInstanceEventNotificationAttributes.go | 30 + .../api_op_DescribeInstanceEventWindows.go | 30 + .../api_op_DescribeInstanceImageMetadata.go | 30 + .../ec2/api_op_DescribeInstanceStatus.go | 32 + .../ec2/api_op_DescribeInstanceTopology.go | 58 +- .../api_op_DescribeInstanceTypeOfferings.go | 30 + .../ec2/api_op_DescribeInstanceTypes.go | 37 +- .../service/ec2/api_op_DescribeInstances.go | 32 + .../ec2/api_op_DescribeInternetGateways.go | 30 + .../service/ec2/api_op_DescribeIpamByoasn.go | 30 + ...eIpamExternalResourceVerificationTokens.go | 30 + .../service/ec2/api_op_DescribeIpamPools.go | 30 + .../api_op_DescribeIpamResourceDiscoveries.go | 30 + ...scribeIpamResourceDiscoveryAssociations.go | 30 + .../service/ec2/api_op_DescribeIpamScopes.go | 30 + .../service/ec2/api_op_DescribeIpams.go | 30 + .../service/ec2/api_op_DescribeIpv6Pools.go | 30 + .../service/ec2/api_op_DescribeKeyPairs.go | 30 + .../api_op_DescribeLaunchTemplateVersions.go | 30 + .../ec2/api_op_DescribeLaunchTemplates.go | 30 + ...eTableVirtualInterfaceGroupAssociations.go | 30 + ...beLocalGatewayRouteTableVpcAssociations.go | 30 + .../api_op_DescribeLocalGatewayRouteTables.go | 30 + ...cribeLocalGatewayVirtualInterfaceGroups.go | 30 + ...p_DescribeLocalGatewayVirtualInterfaces.go | 30 + .../ec2/api_op_DescribeLocalGateways.go | 30 + .../ec2/api_op_DescribeLockedSnapshots.go | 30 + .../service/ec2/api_op_DescribeMacHosts.go | 30 + .../api_op_DescribeMacModificationTasks.go | 323 + .../ec2/api_op_DescribeManagedPrefixLists.go | 32 +- .../ec2/api_op_DescribeMovingAddresses.go | 30 + .../service/ec2/api_op_DescribeNatGateways.go | 30 + .../service/ec2/api_op_DescribeNetworkAcls.go | 30 + ...cribeNetworkInsightsAccessScopeAnalyses.go | 30 + ..._op_DescribeNetworkInsightsAccessScopes.go | 30 + .../api_op_DescribeNetworkInsightsAnalyses.go | 30 + .../api_op_DescribeNetworkInsightsPaths.go | 30 + ...pi_op_DescribeNetworkInterfaceAttribute.go | 30 + ..._op_DescribeNetworkInterfacePermissions.go | 30 + .../ec2/api_op_DescribeNetworkInterfaces.go | 38 +- .../service/ec2/api_op_DescribeOutpostLags.go | 32 + .../ec2/api_op_DescribePlacementGroups.go | 30 + .../service/ec2/api_op_DescribePrefixLists.go | 32 +- .../ec2/api_op_DescribePrincipalIdFormat.go | 30 + .../ec2/api_op_DescribePublicIpv4Pools.go | 30 + .../service/ec2/api_op_DescribeRegions.go | 30 + .../api_op_DescribeReplaceRootVolumeTasks.go | 30 + .../ec2/api_op_DescribeReservedInstances.go | 30 + ...pi_op_DescribeReservedInstancesListings.go | 30 + ..._DescribeReservedInstancesModifications.go | 30 + ...i_op_DescribeReservedInstancesOfferings.go | 30 + .../api_op_DescribeRouteServerEndpoints.go | 30 + .../ec2/api_op_DescribeRouteServerPeers.go | 30 + .../ec2/api_op_DescribeRouteServers.go | 30 + .../service/ec2/api_op_DescribeRouteTables.go | 30 + ...p_DescribeScheduledInstanceAvailability.go | 30 + .../ec2/api_op_DescribeScheduledInstances.go | 30 + .../api_op_DescribeSecurityGroupReferences.go | 30 + .../ec2/api_op_DescribeSecurityGroupRules.go | 30 + ...op_DescribeSecurityGroupVpcAssociations.go | 517 + .../ec2/api_op_DescribeSecurityGroups.go | 30 + ...op_DescribeServiceLinkVirtualInterfaces.go | 30 + .../ec2/api_op_DescribeSnapshotAttribute.go | 30 + .../ec2/api_op_DescribeSnapshotTierStatus.go | 30 + .../service/ec2/api_op_DescribeSnapshots.go | 32 +- ...api_op_DescribeSpotDatafeedSubscription.go | 30 + .../ec2/api_op_DescribeSpotFleetInstances.go | 30 + .../api_op_DescribeSpotFleetRequestHistory.go | 30 + .../ec2/api_op_DescribeSpotFleetRequests.go | 30 + .../api_op_DescribeSpotInstanceRequests.go | 33 + .../ec2/api_op_DescribeSpotPriceHistory.go | 40 + .../ec2/api_op_DescribeStaleSecurityGroups.go | 30 + .../ec2/api_op_DescribeStoreImageTasks.go | 38 +- .../service/ec2/api_op_DescribeSubnets.go | 30 + .../service/ec2/api_op_DescribeTags.go | 30 + ...api_op_DescribeTrafficMirrorFilterRules.go | 30 + .../api_op_DescribeTrafficMirrorFilters.go | 30 + .../api_op_DescribeTrafficMirrorSessions.go | 30 + .../api_op_DescribeTrafficMirrorTargets.go | 30 + ...pi_op_DescribeTransitGatewayAttachments.go | 30 + ...i_op_DescribeTransitGatewayConnectPeers.go | 30 + .../api_op_DescribeTransitGatewayConnects.go | 30 + ..._DescribeTransitGatewayMulticastDomains.go | 30 + ...escribeTransitGatewayPeeringAttachments.go | 30 + ...i_op_DescribeTransitGatewayPolicyTables.go | 30 + ...beTransitGatewayRouteTableAnnouncements.go | 30 + ...pi_op_DescribeTransitGatewayRouteTables.go | 30 + ...op_DescribeTransitGatewayVpcAttachments.go | 30 + .../ec2/api_op_DescribeTransitGateways.go | 30 + ...i_op_DescribeTrunkInterfaceAssociations.go | 30 + .../api_op_DescribeVerifiedAccessEndpoints.go | 30 + .../api_op_DescribeVerifiedAccessGroups.go | 30 + ...fiedAccessInstanceLoggingConfigurations.go | 30 + .../api_op_DescribeVerifiedAccessInstances.go | 30 + ...op_DescribeVerifiedAccessTrustProviders.go | 30 + .../ec2/api_op_DescribeVolumeAttribute.go | 30 + .../ec2/api_op_DescribeVolumeStatus.go | 35 +- .../service/ec2/api_op_DescribeVolumes.go | 33 + .../api_op_DescribeVolumesModifications.go | 30 + .../ec2/api_op_DescribeVpcAttribute.go | 30 + ..._DescribeVpcBlockPublicAccessExclusions.go | 30 + ..._op_DescribeVpcBlockPublicAccessOptions.go | 30 + .../ec2/api_op_DescribeVpcClassicLink.go | 30 + ...api_op_DescribeVpcClassicLinkDnsSupport.go | 30 + .../api_op_DescribeVpcEndpointAssociations.go | 32 +- ...cribeVpcEndpointConnectionNotifications.go | 30 + .../api_op_DescribeVpcEndpointConnections.go | 30 + ...escribeVpcEndpointServiceConfigurations.go | 30 + ...p_DescribeVpcEndpointServicePermissions.go | 30 + .../ec2/api_op_DescribeVpcEndpointServices.go | 30 + .../ec2/api_op_DescribeVpcEndpoints.go | 30 + .../api_op_DescribeVpcPeeringConnections.go | 30 + .../service/ec2/api_op_DescribeVpcs.go | 30 + .../ec2/api_op_DescribeVpnConnections.go | 30 + .../service/ec2/api_op_DescribeVpnGateways.go | 30 + .../ec2/api_op_DetachClassicLinkVpc.go | 30 + .../ec2/api_op_DetachInternetGateway.go | 30 + .../ec2/api_op_DetachNetworkInterface.go | 30 + ...pi_op_DetachVerifiedAccessTrustProvider.go | 30 + .../service/ec2/api_op_DetachVolume.go | 53 +- .../service/ec2/api_op_DetachVpnGateway.go | 30 + .../ec2/api_op_DisableAddressTransfer.go | 30 + .../api_op_DisableAllowedImagesSettings.go | 30 + ...AwsNetworkPerformanceMetricSubscription.go | 30 + .../api_op_DisableEbsEncryptionByDefault.go | 30 + .../service/ec2/api_op_DisableFastLaunch.go | 30 + .../ec2/api_op_DisableFastSnapshotRestores.go | 30 + .../service/ec2/api_op_DisableImage.go | 30 + .../api_op_DisableImageBlockPublicAccess.go | 32 +- .../ec2/api_op_DisableImageDeprecation.go | 34 +- ...op_DisableImageDeregistrationProtection.go | 34 +- ..._op_DisableIpamOrganizationAdminAccount.go | 30 + .../api_op_DisableRouteServerPropagation.go | 30 + .../ec2/api_op_DisableSerialConsoleAccess.go | 30 + ...api_op_DisableSnapshotBlockPublicAccess.go | 30 + ...ableTransitGatewayRouteTablePropagation.go | 30 + .../ec2/api_op_DisableVgwRoutePropagation.go | 30 + .../ec2/api_op_DisableVpcClassicLink.go | 30 + .../api_op_DisableVpcClassicLinkDnsSupport.go | 30 + .../service/ec2/api_op_DisassociateAddress.go | 39 + ...ssociateCapacityReservationBillingOwner.go | 30 + ...i_op_DisassociateClientVpnTargetNetwork.go | 30 + ...p_DisassociateEnclaveCertificateIamRole.go | 30 + .../api_op_DisassociateIamInstanceProfile.go | 30 + .../api_op_DisassociateInstanceEventWindow.go | 30 + .../ec2/api_op_DisassociateIpamByoasn.go | 30 + ...pi_op_DisassociateIpamResourceDiscovery.go | 30 + .../api_op_DisassociateNatGatewayAddress.go | 30 + .../ec2/api_op_DisassociateRouteServer.go | 30 + .../ec2/api_op_DisassociateRouteTable.go | 30 + .../api_op_DisassociateSecurityGroupVpc.go | 30 + .../ec2/api_op_DisassociateSubnetCidrBlock.go | 30 + ...sassociateTransitGatewayMulticastDomain.go | 30 + ...p_DisassociateTransitGatewayPolicyTable.go | 30 + ...op_DisassociateTransitGatewayRouteTable.go | 30 + .../ec2/api_op_DisassociateTrunkInterface.go | 30 + .../ec2/api_op_DisassociateVpcCidrBlock.go | 30 + .../ec2/api_op_EnableAddressTransfer.go | 30 + .../ec2/api_op_EnableAllowedImagesSettings.go | 30 + ...AwsNetworkPerformanceMetricSubscription.go | 30 + .../api_op_EnableEbsEncryptionByDefault.go | 32 +- .../service/ec2/api_op_EnableFastLaunch.go | 30 + .../ec2/api_op_EnableFastSnapshotRestores.go | 33 +- .../service/ec2/api_op_EnableImage.go | 34 +- .../api_op_EnableImageBlockPublicAccess.go | 32 +- .../ec2/api_op_EnableImageDeprecation.go | 30 + ..._op_EnableImageDeregistrationProtection.go | 36 +- ...i_op_EnableIpamOrganizationAdminAccount.go | 30 + ...ReachabilityAnalyzerOrganizationSharing.go | 30 + .../api_op_EnableRouteServerPropagation.go | 30 + .../ec2/api_op_EnableSerialConsoleAccess.go | 30 + .../api_op_EnableSnapshotBlockPublicAccess.go | 30 + ...ableTransitGatewayRouteTablePropagation.go | 30 + .../ec2/api_op_EnableVgwRoutePropagation.go | 30 + .../service/ec2/api_op_EnableVolumeIO.go | 30 + .../ec2/api_op_EnableVpcClassicLink.go | 30 + .../api_op_EnableVpcClassicLinkDnsSupport.go | 30 + ...lientVpnClientCertificateRevocationList.go | 30 + ...i_op_ExportClientVpnClientConfiguration.go | 30 + .../service/ec2/api_op_ExportImage.go | 30 + .../ec2/api_op_ExportTransitGatewayRoutes.go | 30 + ...rifiedAccessInstanceClientConfiguration.go | 30 + .../ec2/api_op_GetActiveVpnTunnelStatus.go | 202 + .../ec2/api_op_GetAllowedImagesSettings.go | 30 + ...GetAssociatedEnclaveCertificateIamRoles.go | 30 + .../ec2/api_op_GetAssociatedIpv6PoolCidrs.go | 30 + .../api_op_GetAwsNetworkPerformanceData.go | 30 + .../ec2/api_op_GetCapacityReservationUsage.go | 30 + .../service/ec2/api_op_GetCoipPoolUsage.go | 30 + .../service/ec2/api_op_GetConsoleOutput.go | 30 + .../ec2/api_op_GetConsoleScreenshot.go | 30 + ..._op_GetDeclarativePoliciesReportSummary.go | 30 + .../api_op_GetDefaultCreditSpecification.go | 30 + .../ec2/api_op_GetEbsDefaultKmsKeyId.go | 33 +- .../ec2/api_op_GetEbsEncryptionByDefault.go | 30 + .../api_op_GetFlowLogsIntegrationTemplate.go | 30 + .../api_op_GetGroupsForCapacityReservation.go | 30 + ...pi_op_GetHostReservationPurchasePreview.go | 30 + .../api_op_GetImageBlockPublicAccessState.go | 32 +- .../ec2/api_op_GetInstanceMetadataDefaults.go | 30 + .../service/ec2/api_op_GetInstanceTpmEkPub.go | 30 + ...etInstanceTypesFromInstanceRequirements.go | 33 + .../service/ec2/api_op_GetInstanceUefiData.go | 30 + .../ec2/api_op_GetIpamAddressHistory.go | 30 + .../ec2/api_op_GetIpamDiscoveredAccounts.go | 30 + ...api_op_GetIpamDiscoveredPublicAddresses.go | 30 + .../api_op_GetIpamDiscoveredResourceCidrs.go | 30 + .../ec2/api_op_GetIpamPoolAllocations.go | 30 + .../service/ec2/api_op_GetIpamPoolCidrs.go | 30 + .../ec2/api_op_GetIpamResourceCidrs.go | 30 + .../ec2/api_op_GetLaunchTemplateData.go | 30 + ...api_op_GetManagedPrefixListAssociations.go | 30 + .../ec2/api_op_GetManagedPrefixListEntries.go | 30 + ...workInsightsAccessScopeAnalysisFindings.go | 30 + ...op_GetNetworkInsightsAccessScopeContent.go | 30 + .../service/ec2/api_op_GetPasswordData.go | 30 + ...pi_op_GetReservedInstancesExchangeQuote.go | 30 + .../ec2/api_op_GetRouteServerAssociations.go | 30 + .../ec2/api_op_GetRouteServerPropagations.go | 30 + .../api_op_GetRouteServerRoutingDatabase.go | 30 + .../ec2/api_op_GetSecurityGroupsForVpc.go | 30 + .../api_op_GetSerialConsoleAccessStatus.go | 30 + ...pi_op_GetSnapshotBlockPublicAccessState.go | 30 + .../ec2/api_op_GetSpotPlacementScores.go | 30 + .../ec2/api_op_GetSubnetCidrReservations.go | 30 + ...GetTransitGatewayAttachmentPropagations.go | 30 + ...ansitGatewayMulticastDomainAssociations.go | 30 + ...etTransitGatewayPolicyTableAssociations.go | 30 + ..._op_GetTransitGatewayPolicyTableEntries.go | 30 + ...p_GetTransitGatewayPrefixListReferences.go | 30 + ...GetTransitGatewayRouteTableAssociations.go | 30 + ...GetTransitGatewayRouteTablePropagations.go | 30 + .../api_op_GetVerifiedAccessEndpointPolicy.go | 30 + ...api_op_GetVerifiedAccessEndpointTargets.go | 30 + .../api_op_GetVerifiedAccessGroupPolicy.go | 30 + ...tVpnConnectionDeviceSampleConfiguration.go | 35 + .../ec2/api_op_GetVpnConnectionDeviceTypes.go | 30 + .../api_op_GetVpnTunnelReplacementStatus.go | 30 + ...lientVpnClientCertificateRevocationList.go | 30 + .../service/ec2/api_op_ImportImage.go | 30 + .../service/ec2/api_op_ImportInstance.go | 30 + .../service/ec2/api_op_ImportKeyPair.go | 30 + .../service/ec2/api_op_ImportSnapshot.go | 30 + .../service/ec2/api_op_ImportVolume.go | 45 +- .../ec2/api_op_ListImagesInRecycleBin.go | 30 + .../ec2/api_op_ListSnapshotsInRecycleBin.go | 30 + .../service/ec2/api_op_LockSnapshot.go | 30 + .../ec2/api_op_ModifyAddressAttribute.go | 30 + .../ec2/api_op_ModifyAvailabilityZoneGroup.go | 30 + .../ec2/api_op_ModifyCapacityReservation.go | 30 + .../api_op_ModifyCapacityReservationFleet.go | 30 + .../ec2/api_op_ModifyClientVpnEndpoint.go | 30 + ...api_op_ModifyDefaultCreditSpecification.go | 30 + .../ec2/api_op_ModifyEbsDefaultKmsKeyId.go | 34 +- .../service/ec2/api_op_ModifyFleet.go | 30 + .../ec2/api_op_ModifyFpgaImageAttribute.go | 30 + .../service/ec2/api_op_ModifyHosts.go | 30 + .../service/ec2/api_op_ModifyIdFormat.go | 30 + .../ec2/api_op_ModifyIdentityIdFormat.go | 30 + .../ec2/api_op_ModifyImageAttribute.go | 30 + .../ec2/api_op_ModifyInstanceAttribute.go | 32 +- ...fyInstanceCapacityReservationAttributes.go | 30 + .../api_op_ModifyInstanceConnectEndpoint.go | 228 + .../ec2/api_op_ModifyInstanceCpuOptions.go | 30 + ...pi_op_ModifyInstanceCreditSpecification.go | 30 + .../api_op_ModifyInstanceEventStartTime.go | 30 + .../ec2/api_op_ModifyInstanceEventWindow.go | 30 + ...api_op_ModifyInstanceMaintenanceOptions.go | 66 + .../api_op_ModifyInstanceMetadataDefaults.go | 30 + .../api_op_ModifyInstanceMetadataOptions.go | 30 + ...ModifyInstanceNetworkPerformanceOptions.go | 30 + .../ec2/api_op_ModifyInstancePlacement.go | 30 + .../service/ec2/api_op_ModifyIpam.go | 30 + .../service/ec2/api_op_ModifyIpamPool.go | 30 + .../ec2/api_op_ModifyIpamResourceCidr.go | 30 + .../ec2/api_op_ModifyIpamResourceDiscovery.go | 30 + .../service/ec2/api_op_ModifyIpamScope.go | 30 + .../ec2/api_op_ModifyLaunchTemplate.go | 30 + .../ec2/api_op_ModifyLocalGatewayRoute.go | 30 + .../ec2/api_op_ModifyManagedPrefixList.go | 30 + .../api_op_ModifyNetworkInterfaceAttribute.go | 33 + .../ec2/api_op_ModifyPrivateDnsNameOptions.go | 30 + .../api_op_ModifyPublicIpDnsNameOptions.go | 222 + .../ec2/api_op_ModifyReservedInstances.go | 30 + .../service/ec2/api_op_ModifyRouteServer.go | 30 + .../ec2/api_op_ModifySecurityGroupRules.go | 30 + .../ec2/api_op_ModifySnapshotAttribute.go | 30 + .../service/ec2/api_op_ModifySnapshotTier.go | 30 + .../ec2/api_op_ModifySpotFleetRequest.go | 30 + .../ec2/api_op_ModifySubnetAttribute.go | 30 + ...odifyTrafficMirrorFilterNetworkServices.go | 30 + .../api_op_ModifyTrafficMirrorFilterRule.go | 30 + .../ec2/api_op_ModifyTrafficMirrorSession.go | 30 + .../ec2/api_op_ModifyTransitGateway.go | 30 + ...ModifyTransitGatewayPrefixListReference.go | 30 + ...pi_op_ModifyTransitGatewayVpcAttachment.go | 30 + .../api_op_ModifyVerifiedAccessEndpoint.go | 30 + ...i_op_ModifyVerifiedAccessEndpointPolicy.go | 30 + .../ec2/api_op_ModifyVerifiedAccessGroup.go | 30 + .../api_op_ModifyVerifiedAccessGroupPolicy.go | 30 + .../api_op_ModifyVerifiedAccessInstance.go | 30 + ...ifiedAccessInstanceLoggingConfiguration.go | 30 + ...pi_op_ModifyVerifiedAccessTrustProvider.go | 30 + .../service/ec2/api_op_ModifyVolume.go | 30 + .../ec2/api_op_ModifyVolumeAttribute.go | 30 + .../service/ec2/api_op_ModifyVpcAttribute.go | 30 + ..._op_ModifyVpcBlockPublicAccessExclusion.go | 30 + ...pi_op_ModifyVpcBlockPublicAccessOptions.go | 30 + .../service/ec2/api_op_ModifyVpcEndpoint.go | 30 + ...ModifyVpcEndpointConnectionNotification.go | 30 + ...p_ModifyVpcEndpointServiceConfiguration.go | 30 + ...fyVpcEndpointServicePayerResponsibility.go | 30 + ..._op_ModifyVpcEndpointServicePermissions.go | 30 + ...pi_op_ModifyVpcPeeringConnectionOptions.go | 30 + .../service/ec2/api_op_ModifyVpcTenancy.go | 30 + .../service/ec2/api_op_ModifyVpnConnection.go | 30 + .../ec2/api_op_ModifyVpnConnectionOptions.go | 30 + .../ec2/api_op_ModifyVpnTunnelCertificate.go | 30 + .../ec2/api_op_ModifyVpnTunnelOptions.go | 35 + .../service/ec2/api_op_MonitorInstances.go | 30 + .../service/ec2/api_op_MoveAddressToVpc.go | 37 +- .../service/ec2/api_op_MoveByoipCidrToIpam.go | 30 + ...api_op_MoveCapacityReservationInstances.go | 30 + .../service/ec2/api_op_ProvisionByoipCidr.go | 38 +- .../service/ec2/api_op_ProvisionIpamByoasn.go | 30 + .../ec2/api_op_ProvisionIpamPoolCidr.go | 30 + .../ec2/api_op_ProvisionPublicIpv4PoolCidr.go | 30 + .../ec2/api_op_PurchaseCapacityBlock.go | 33 + .../api_op_PurchaseCapacityBlockExtension.go | 30 + .../ec2/api_op_PurchaseHostReservation.go | 30 + ...pi_op_PurchaseReservedInstancesOffering.go | 30 + .../ec2/api_op_PurchaseScheduledInstances.go | 30 + .../service/ec2/api_op_RebootInstances.go | 30 + .../service/ec2/api_op_RegisterImage.go | 72 +- ...sterInstanceEventNotificationAttributes.go | 30 + ...sterTransitGatewayMulticastGroupMembers.go | 30 + ...sterTransitGatewayMulticastGroupSources.go | 30 + ...jectCapacityReservationBillingOwnership.go | 30 + ...ansitGatewayMulticastDomainAssociations.go | 30 + ...p_RejectTransitGatewayPeeringAttachment.go | 30 + ...pi_op_RejectTransitGatewayVpcAttachment.go | 30 + .../api_op_RejectVpcEndpointConnections.go | 30 + .../ec2/api_op_RejectVpcPeeringConnection.go | 30 + .../service/ec2/api_op_ReleaseAddress.go | 43 +- .../service/ec2/api_op_ReleaseHosts.go | 30 + .../ec2/api_op_ReleaseIpamPoolAllocation.go | 30 + ...op_ReplaceIamInstanceProfileAssociation.go | 30 + ...aceImageCriteriaInAllowedImagesSettings.go | 30 + .../api_op_ReplaceNetworkAclAssociation.go | 30 + .../ec2/api_op_ReplaceNetworkAclEntry.go | 30 + .../service/ec2/api_op_ReplaceRoute.go | 33 + .../api_op_ReplaceRouteTableAssociation.go | 30 + .../ec2/api_op_ReplaceTransitGatewayRoute.go | 30 + .../service/ec2/api_op_ReplaceVpnTunnel.go | 30 + .../ec2/api_op_ReportInstanceStatus.go | 30 + .../service/ec2/api_op_RequestSpotFleet.go | 30 + .../ec2/api_op_RequestSpotInstances.go | 30 + .../ec2/api_op_ResetAddressAttribute.go | 30 + .../ec2/api_op_ResetEbsDefaultKmsKeyId.go | 30 + .../ec2/api_op_ResetFpgaImageAttribute.go | 30 + .../service/ec2/api_op_ResetImageAttribute.go | 30 + .../ec2/api_op_ResetInstanceAttribute.go | 30 + .../api_op_ResetNetworkInterfaceAttribute.go | 30 + .../ec2/api_op_ResetSnapshotAttribute.go | 30 + .../ec2/api_op_RestoreAddressToClassic.go | 30 + .../ec2/api_op_RestoreImageFromRecycleBin.go | 34 +- .../api_op_RestoreManagedPrefixListVersion.go | 30 + .../api_op_RestoreSnapshotFromRecycleBin.go | 30 + .../service/ec2/api_op_RestoreSnapshotTier.go | 30 + .../ec2/api_op_RevokeClientVpnIngress.go | 30 + .../ec2/api_op_RevokeSecurityGroupEgress.go | 30 + .../ec2/api_op_RevokeSecurityGroupIngress.go | 30 + .../service/ec2/api_op_RunInstances.go | 70 +- .../ec2/api_op_RunScheduledInstances.go | 30 + .../ec2/api_op_SearchLocalGatewayRoutes.go | 30 + ..._op_SearchTransitGatewayMulticastGroups.go | 30 + .../ec2/api_op_SearchTransitGatewayRoutes.go | 30 + .../ec2/api_op_SendDiagnosticInterrupt.go | 30 + .../api_op_StartDeclarativePoliciesReport.go | 37 +- .../service/ec2/api_op_StartInstances.go | 30 + ...StartNetworkInsightsAccessScopeAnalysis.go | 30 + .../api_op_StartNetworkInsightsAnalysis.go | 30 + ...pcEndpointServicePrivateDnsVerification.go | 30 + .../service/ec2/api_op_StopInstances.go | 71 +- .../api_op_TerminateClientVpnConnections.go | 30 + .../service/ec2/api_op_TerminateInstances.go | 53 +- .../ec2/api_op_UnassignIpv6Addresses.go | 30 + .../ec2/api_op_UnassignPrivateIpAddresses.go | 30 + ...api_op_UnassignPrivateNatGatewayAddress.go | 30 + .../service/ec2/api_op_UnlockSnapshot.go | 30 + .../service/ec2/api_op_UnmonitorInstances.go | 30 + ...dateSecurityGroupRuleDescriptionsEgress.go | 30 + ...ateSecurityGroupRuleDescriptionsIngress.go | 30 + .../service/ec2/api_op_WithdrawByoipCidr.go | 30 + .../aws/aws-sdk-go-v2/service/ec2/auth.go | 28 +- .../service/ec2/deserializers.go | 16770 ++++++++++------ .../aws-sdk-go-v2/service/ec2/generated.json | 13 + .../service/ec2/go_module_metadata.go | 2 +- .../ec2/internal/endpoints/endpoints.go | 6 + .../aws/aws-sdk-go-v2/service/ec2/options.go | 7 + .../aws-sdk-go-v2/service/ec2/serializers.go | 6790 ++++--- .../aws-sdk-go-v2/service/ec2/types/enums.go | 502 +- .../aws-sdk-go-v2/service/ec2/types/types.go | 890 +- .../aws-sdk-go-v2/service/ec2/validators.go | 355 +- .../internal/accept-encoding/CHANGELOG.md | 12 + .../accept-encoding/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 43 + .../presigned-url/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 18 + vendor/github.com/aws/smithy-go/README.md | 17 +- .../aws/smithy-go/endpoints/endpoint.go | 2 +- .../aws/smithy-go/go_module_metadata.go | 2 +- .../smithy-go/transport/http/interceptor.go | 321 + .../transport/http/interceptor_middleware.go | 325 + .../github.com/bombsimon/wsl/v4/.golangci.yml | 15 +- .../github.com/bombsimon/wsl/v4/analyzer.go | 23 +- vendor/github.com/bombsimon/wsl/v4/wsl.go | 14 +- .../butuzov/ireturn/analyzer/analyzer.go | 24 +- .../ireturn/analyzer/internal/config/allow.go | 2 +- .../ireturn/analyzer/internal/config/new.go | 1 - .../analyzer/internal/config/reject.go | 2 +- .../ireturn/analyzer/internal/types/iface.go | 2 +- .../github.com/butuzov/mirror/MIRROR_FUNCS.md | 254 +- vendor/github.com/butuzov/mirror/Makefile | 19 +- vendor/github.com/butuzov/mirror/analyzer.go | 4 +- .../butuzov/mirror/checkers_maphash.go | 87 +- .../mirror/internal/checker/checker.go | 6 +- .../mirror/internal/checker/violation.go | 6 +- vendor/github.com/butuzov/mirror/readme.md | 13 +- .../perfsprint/analyzer/analyzer.go | 369 +- .../perfsprint/analyzer/diagnostic.go | 24 + .../ckaznocha/intrange/.golangci.yml | 17 +- .../github.com/ckaznocha/intrange/intrange.go | 190 +- .../curioswitch/go-reassign/.golangci.yml | 5 +- .../curioswitch/go-reassign/README.md | 7 +- .../go-reassign/internal/analyzer/analyzer.go | 24 +- .../emicklei/go-restful/v3/CHANGES.md | 5 +- .../emicklei/go-restful/v3/README.md | 2 +- .../emicklei/go-restful/v3/jsr311.go | 19 +- .../emicklei/go-restful/v3/route.go | 2 + vendor/github.com/fxamacker/cbor/v2/README.md | 607 +- .../fxamacker/cbor/v2/bytestring.go | 27 + vendor/github.com/fxamacker/cbor/v2/cache.go | 25 +- vendor/github.com/fxamacker/cbor/v2/common.go | 9 + vendor/github.com/fxamacker/cbor/v2/decode.go | 425 +- vendor/github.com/fxamacker/cbor/v2/doc.go | 51 +- vendor/github.com/fxamacker/cbor/v2/encode.go | 442 +- .../fxamacker/cbor/v2/encode_map.go | 10 +- .../fxamacker/cbor/v2/encode_map_go117.go | 60 - .../fxamacker/cbor/v2/omitzero_go124.go | 8 + .../fxamacker/cbor/v2/omitzero_pre_go124.go | 8 + .../fxamacker/cbor/v2/simplevalue.go | 29 + vendor/github.com/fxamacker/cbor/v2/stream.go | 4 +- .../fxamacker/cbor/v2/structfields.go | 18 +- vendor/github.com/fxamacker/cbor/v2/tag.go | 48 +- .../ghostiam/protogetter/.goreleaser.yaml | 3 +- .../ghostiam/protogetter/protogetter.go | 55 +- .../go-critic/checkers/hugeParam_checker.go | 1 + .../go-viper/mapstructure/v2/.editorconfig | 3 + .../go-viper/mapstructure/v2/.golangci.yaml | 46 +- .../go-viper/mapstructure/v2/README.md | 9 +- .../go-viper/mapstructure/v2/decode_hooks.go | 26 +- .../go-viper/mapstructure/v2/errors.go | 74 + .../go-viper/mapstructure/v2/flake.lock | 390 +- .../go-viper/mapstructure/v2/flake.nix | 45 +- .../go-viper/mapstructure/v2/mapstructure.go | 220 +- vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go | 18 +- vendor/github.com/golangci/dupl/.travis.yml | 5 - vendor/github.com/golangci/dupl/README.md | 63 - .../golangci/dupl/{main.go => lib/lib.go} | 66 +- .../github.com/golangci/dupl/printer/html.go | 14 +- .../golangci/dupl/printer/issuer.go | 56 + .../golangci/dupl/printer/plumbing.go | 44 +- .../golangci/dupl/suffixtree/suffixtree.go | 2 +- .../github.com/golangci/dupl/syntax/syntax.go | 20 +- .../github.com/golangci/gofmt/gofmt/gofmt.go | 2 +- .../golangci/gofmt/gofmt/golangci.go | 39 +- .../github.com/golangci/gofmt/gofmt/readme.md | 3 +- .../golangci/gofmt/goimports/goimports.go | 88 - .../golangci/gofmt/goimports/golangci.go | 35 - .../golangci/gofmt/goimports/readme.md | 10 - .../golangci-lint/cmd/golangci-lint/main.go | 15 +- .../golangci-lint/internal/cache/cache.go | 6 +- .../golangci-lint/internal/x}/LICENSE | 4 +- .../internal/x/tools/analysisflags/readme.md | 8 + .../internal/x/tools/analysisflags/url.go | 33 + .../x/tools/analysisinternal/analysis.go | 48 + .../x/tools/analysisinternal/readme.md | 8 + .../internal/x/tools/diff/diff.go | 176 + .../internal/x/tools/diff/lcs/common.go | 179 + .../internal/x/tools/diff/lcs/doc.go | 156 + .../internal/x/tools/diff/lcs/git.sh | 33 + .../internal/x/tools/diff/lcs/labels.go | 55 + .../internal/x/tools/diff/lcs/old.go | 480 + .../internal/x/tools/diff/lcs/sequence.go | 113 + .../internal/x/tools/diff/ndiff.go | 99 + .../internal/x/tools/diff/readme.md | 8 + .../internal/x/tools/diff/unified.go | 251 + .../pkg/commands/config_verify.go | 114 +- .../golangci-lint/pkg/commands/flagsets.go | 49 +- .../golangci-lint/pkg/commands/help.go | 131 +- .../golangci-lint/pkg/commands/run.go | 30 +- .../golangci-lint/pkg/config/base_rule.go | 75 + .../golangci-lint/pkg/config/config.go | 111 +- .../golangci-lint/pkg/config/issues.go | 93 +- .../golangci-lint/pkg/config/linters.go | 14 +- .../pkg/config/linters_exclusions.go | 55 + .../pkg/config/linters_settings.go | 124 +- .../golangci-lint/pkg/config/loader.go | 74 +- .../golangci-lint/pkg/config/output.go | 12 +- .../golangci/golangci-lint/pkg/config/run.go | 20 +- .../golangci-lint/pkg/fsutils/basepath.go | 77 + .../golangci-lint/pkg/fsutils/fsutils.go | 14 +- .../golangci-lint/pkg/fsutils/fsutils_unix.go | 9 + .../pkg/fsutils/fsutils_windows.go | 39 + .../golangci-lint/pkg/goanalysis/issue.go | 2 +- .../golangci-lint/pkg/goanalysis/position.go | 50 + .../golangci-lint/pkg/goanalysis/runner.go | 54 +- .../pkg/goanalysis/runner_action.go | 18 +- .../pkg/goanalysis/runner_action_cache.go | 24 +- .../{runner_base.go => runner_checker.go} | 248 +- .../pkg/goanalysis/runner_loadingpackage.go | 27 +- .../golangci-lint/pkg/goanalysis/runners.go | 43 +- .../pkg/goanalysis/runners_cache.go | 4 +- .../pkg/goformatters/analyzer.go | 55 + .../pkg/goformatters/formatters.go | 6 + .../golangci-lint/pkg/goformatters/gci/gci.go | 71 + .../pkg/goformatters/gci/internal/LICENSE | 29 + .../gci/internal/config/config.go | 107 + .../gci/internal/section/parser.go | 51 + .../gci/internal/section/section.go | 7 + .../gci/internal/section/standard.go | 30 + .../gci/internal/section/standard_list.go | 182 + .../pkg/goformatters/gofmt/gofmt.go | 35 + .../pkg/goformatters/gofumpt/gofumpt.go | 46 + .../pkg/goformatters/goimports/goimports.go | 28 + .../pkg/goformatters/internal/commons.go | 6 + .../pkg/goformatters/internal/diff.go | 274 + .../pkg/goformatters/meta_formatter.go | 74 + .../pkg/golinters/asasalint/asasalint.go | 10 +- .../pkg/golinters/bidichk/bidichk.go | 28 +- .../pkg/golinters/bodyclose/bodyclose.go | 2 +- .../pkg/golinters/cyclop/cyclop.go | 2 +- .../pkg/golinters/depguard/depguard.go | 12 +- .../pkg/golinters/dogsled/dogsled.go | 104 +- .../golangci-lint/pkg/golinters/dupl/dupl.go | 8 +- .../pkg/golinters/dupword/dupword.go | 14 +- .../pkg/golinters/errcheck/errcheck.go | 6 +- .../pkg/golinters/errchkjson/errchkjson.go | 16 +- .../pkg/golinters/errorlint/errorlint.go | 24 +- .../exportloopref.go => exptostd/exptostd.go} | 6 +- .../pkg/golinters/fatcontext/fatcontext.go | 15 +- .../pkg/golinters/forbidigo/forbidigo.go | 44 +- .../forcetypeassert/forcetypeassert.go | 2 +- .../pkg/golinters/funlen/funlen.go | 74 +- .../golangci-lint/pkg/golinters/gci/gci.go | 245 +- .../golinters/ginkgolinter/ginkgolinter.go | 24 +- .../gochecknoglobals/gochecknoglobals.go | 9 +- .../gochecknoinits/gochecknoinits.go | 64 +- .../gochecksumtype/gochecksumtype.go | 8 +- .../pkg/golinters/gocritic/gocritic.go | 109 +- .../pkg/golinters/godot/godot.go | 71 +- .../pkg/golinters/godox/godox.go | 71 +- .../pkg/golinters/gofmt/gofmt.go | 92 +- .../pkg/golinters/gofumpt/gofumpt.go | 126 +- .../pkg/golinters/goheader/goheader.go | 107 +- .../pkg/golinters/goimports/goimports.go | 88 +- .../gomoddirectives/gomoddirectives.go | 25 +- .../pkg/golinters/gomodguard/gomodguard.go | 2 +- .../pkg/golinters/gosec/gosec.go | 10 +- .../golinters/gosmopolitan/gosmopolitan.go | 18 +- .../pkg/golinters/govet/govet.go | 23 +- .../pkg/golinters/grouper/grouper.go | 6 +- .../pkg/golinters/importas/importas.go | 7 +- .../pkg/golinters/internal/commons.go | 9 + .../pkg/golinters/internal/diff.go | 264 - .../pkg/golinters/internal/util.go | 21 +- .../golangci-lint/pkg/golinters/lll/lll.go | 102 +- .../pkg/golinters/loggercheck/loggercheck.go | 3 + .../pkg/golinters/maintidx/maintidx.go | 12 +- .../pkg/golinters/makezero/makezero.go | 37 +- .../pkg/golinters/mirror/mirror.go | 64 +- .../pkg/golinters/misspell/misspell.go | 126 +- .../pkg/golinters/musttag/musttag.go | 6 +- .../pkg/golinters/nestif/nestif.go | 63 +- .../pkg/golinters/nilnesserr/nilnesserr.go | 23 + .../pkg/golinters/nilnil/nilnil.go | 8 +- .../golinters/nolintlint/internal/issues.go | 41 + .../nolintlint/internal/nolintlint.go | 261 +- .../pkg/golinters/nolintlint/nolintlint.go | 84 +- .../pkg/golinters/perfsprint/perfsprint.go | 8 + .../pkg/golinters/prealloc/prealloc.go | 35 +- .../pkg/golinters/protogetter/protogetter.go | 46 +- .../pkg/golinters/recvcheck/recvcheck.go | 12 +- .../pkg/golinters/revive/revive.go | 84 +- .../pkg/golinters/spancheck/spancheck.go | 8 +- .../pkg/golinters/tagalign/tagalign.go | 44 +- .../pkg/golinters/tagliatelle/tagliatelle.go | 42 +- .../pkg/golinters/testpackage/testpackage.go | 14 +- .../pkg/golinters/thelper/thelper.go | 21 +- .../pkg/golinters/unparam/unparam.go | 34 +- .../golinters/usestdlibvars/usestdlibvars.go | 34 +- .../pkg/golinters/usetesting/usetesting.go | 33 + .../pkg/golinters/whitespace/whitespace.go | 76 +- .../pkg/golinters/wrapcheck/wrapcheck.go | 2 + .../golangci/golangci-lint/pkg/goutil/env.go | 34 +- .../golangci-lint/pkg/lint/context.go | 8 +- .../golangci-lint/pkg/lint/linter/config.go | 8 +- .../golangci-lint/pkg/lint/linter/context.go | 6 +- .../pkg/lint/lintersdb/builder_linter.go | 89 +- .../pkg/lint/lintersdb/builder_plugin_go.go | 17 +- .../pkg/lint/lintersdb/manager.go | 23 +- .../golangci-lint/pkg/lint/package.go | 6 +- .../golangci/golangci-lint/pkg/lint/runner.go | 59 +- .../golangci-lint/pkg/logutils/logutils.go | 78 +- .../golangci-lint/pkg/printers/checkstyle.go | 81 +- .../golangci-lint/pkg/printers/codeclimate.go | 88 +- .../golangci-lint/pkg/printers/html.go | 2 + .../golangci-lint/pkg/printers/json.go | 5 +- .../golangci-lint/pkg/printers/junitxml.go | 78 +- .../golangci-lint/pkg/printers/printer.go | 67 +- .../golangci-lint/pkg/printers/sarif.go | 125 +- .../golangci-lint/pkg/printers/tab.go | 5 +- .../golangci-lint/pkg/printers/teamcity.go | 42 +- .../golangci-lint/pkg/printers/text.go | 9 +- .../golangci-lint/pkg/result/issue.go | 29 +- .../pkg/result/processors/base_rule.go | 49 +- .../pkg/result/processors/cgo.go | 37 +- .../pkg/result/processors/diff.go | 34 +- .../pkg/result/processors/exclude.go | 55 - .../pkg/result/processors/exclude_rules.go | 105 - ....go => exclusion_generated_file_filter.go} | 28 +- .../pkg/result/processors/exclusion_paths.go | 118 + .../result/processors/exclusion_presets.go | 138 + .../pkg/result/processors/exclusion_rules.go | 164 + .../result/processors/filename_unadjuster.go | 27 +- .../pkg/result/processors/fixer.go | 372 +- .../result/processors/identifier_marker.go | 225 +- .../pkg/result/processors/invalid_issue.go | 3 + .../pkg/result/processors/max_from_linter.go | 6 +- .../processors/max_per_file_from_linter.go | 2 + .../pkg/result/processors/max_same_issues.go | 7 +- .../{nolint.go => nolint_filter.go} | 36 +- .../pkg/result/processors/path_absoluter.go | 44 + .../pkg/result/processors/path_prefixer.go | 36 - .../pkg/result/processors/path_prettifier.go | 31 +- .../pkg/result/processors/path_relativity.go | 60 + .../pkg/result/processors/path_shortener.go | 2 + .../pkg/result/processors/severity.go | 54 +- .../pkg/result/processors/skip_dirs.go | 32 +- .../pkg/result/processors/skip_files.go | 30 +- .../pkg/result/processors/sort_results.go | 13 +- .../pkg/result/processors/source_code.go | 7 + .../pkg/result/processors/uniq_by_line.go | 16 +- vendor/github.com/golangci/modinfo/.gitignore | 1 - .../github.com/golangci/modinfo/.golangci.yml | 157 - vendor/github.com/golangci/modinfo/LICENSE | 674 - vendor/github.com/golangci/modinfo/Makefile | 12 - vendor/github.com/golangci/modinfo/module.go | 157 - vendor/github.com/golangci/modinfo/readme.md | 73 - .../github.com/golangci/revgrep/.golangci.yml | 71 +- vendor/github.com/golangci/revgrep/README.md | 10 +- vendor/github.com/golangci/revgrep/issue.go | 37 + vendor/github.com/golangci/revgrep/patch.go | 195 + vendor/github.com/golangci/revgrep/revgrep.go | 311 +- .../google/gnostic-models/compiler/context.go | 2 +- .../gnostic-models/compiler/extensions.go | 2 +- .../google/gnostic-models/compiler/helpers.go | 2 +- .../google/gnostic-models/compiler/reader.go | 2 +- .../gnostic-models/jsonschema/models.go | 2 +- .../gnostic-models/jsonschema/reader.go | 2 +- .../gnostic-models/jsonschema/writer.go | 2 +- .../gnostic-models/openapiv2/OpenAPIv2.go | 80 +- .../gnostic-models/openapiv2/document.go | 2 +- .../gnostic-models/openapiv3/OpenAPIv3.go | 24 +- .../gnostic-models/openapiv3/document.go | 2 +- .../gostaticanalysis/comment/.tagpr | 35 + .../gostaticanalysis/comment/CHANGELOG.md | 34 + .../gostaticanalysis/comment/comment.go | 23 +- .../gostaticanalysis/comment/version.txt | 1 + .../gostaticanalysis/forcetypeassert/.tagpr | 35 + .../forcetypeassert/CHANGELOG.md | 19 + .../forcetypeassert/forcetypeassert.go | 29 +- .../forcetypeassert/version.txt | 1 + .../go-immutable-radix/v2/.gitignore | 24 + .../go-immutable-radix/v2/CHANGELOG.md | 27 + .../{hcl => go-immutable-radix/v2}/LICENSE | 303 +- .../hashicorp/go-immutable-radix/v2/README.md | 73 + .../hashicorp/go-immutable-radix/v2/edges.go | 21 + .../hashicorp/go-immutable-radix/v2/iradix.go | 679 + .../hashicorp/go-immutable-radix/v2/iter.go | 205 + .../hashicorp/go-immutable-radix/v2/node.go | 326 + .../go-immutable-radix/v2/path_iter.go | 59 + .../go-immutable-radix/v2/raw_iter.go | 78 + .../go-immutable-radix/v2/reverse_iter.go | 240 + .../hashicorp/golang-lru/v2/LICENSE | 364 + .../hashicorp/golang-lru/v2/internal/list.go | 142 + .../golang-lru/v2/simplelru/LICENSE_list} | 2 + .../hashicorp/golang-lru/v2/simplelru/lru.go | 177 + .../golang-lru/v2/simplelru/lru_interface.go | 46 + vendor/github.com/hashicorp/hcl/.gitignore | 9 - vendor/github.com/hashicorp/hcl/.travis.yml | 13 - vendor/github.com/hashicorp/hcl/Makefile | 18 - vendor/github.com/hashicorp/hcl/README.md | 125 - vendor/github.com/hashicorp/hcl/appveyor.yml | 19 - vendor/github.com/hashicorp/hcl/decoder.go | 729 - vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 - .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 532 - .../hashicorp/hcl/hcl/printer/nodes.go | 789 - .../hashicorp/hcl/hcl/printer/printer.go | 66 - .../hashicorp/hcl/hcl/scanner/scanner.go | 652 - .../hashicorp/hcl/hcl/strconv/quote.go | 241 - .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 219 - .../hashicorp/hcl/json/parser/flatten.go | 117 - .../hashicorp/hcl/json/parser/parser.go | 313 - .../hashicorp/hcl/json/scanner/scanner.go | 451 - .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - vendor/github.com/hashicorp/hcl/lex.go | 38 - vendor/github.com/hashicorp/hcl/parse.go | 39 - .../github.com/jjti/go-spancheck/.gitignore | 2 + .../jjti/go-spancheck/.golangci.yml | 7 - vendor/github.com/jjti/go-spancheck/go.work | 2 +- .../github.com/jjti/go-spancheck/go.work.sum | 7 + .../github.com/jjti/go-spancheck/spancheck.go | 51 +- vendor/github.com/julz/importas/Makefile | 17 + vendor/github.com/julz/importas/analyzer.go | 16 +- vendor/github.com/julz/importas/config.go | 18 +- vendor/github.com/julz/importas/flags.go | 21 +- .../karamaru-alpha/copyloopvar/copyloopvar.go | 46 +- .../kisielk/errcheck/errcheck/errcheck.go | 6 +- .../kisielk/errcheck/errcheck/tags.go | 12 - .../kisielk/errcheck/errcheck/tags_compat.go | 13 - .../kkHAIKE/contextcheck/contextcheck.go | 8 + .../kyoh86/exportloopref/.golangci.yml | 4 - .../kyoh86/exportloopref/.goreleaser.yml | 51 - .../github.com/kyoh86/exportloopref/LICENSE | 21 - .../github.com/kyoh86/exportloopref/Makefile | 16 - .../github.com/kyoh86/exportloopref/README.md | 223 - .../kyoh86/exportloopref/exportloopref.go | 334 - vendor/github.com/ldez/exptostd/.gitignore | 2 + vendor/github.com/ldez/exptostd/.golangci.yml | 83 + vendor/github.com/ldez/exptostd/LICENSE | 190 + vendor/github.com/ldez/exptostd/Makefile | 15 + vendor/github.com/ldez/exptostd/exptostd.go | 475 + vendor/github.com/ldez/exptostd/readme.md | 116 + .../ldez/gomoddirectives/.golangci.yml | 72 +- .../github.com/ldez/gomoddirectives/LICENSE | 2 +- .../ldez/gomoddirectives/gomoddirectives.go | 174 +- .../github.com/ldez/gomoddirectives/module.go | 35 +- .../github.com/ldez/gomoddirectives/readme.md | 198 +- .../github.com/ldez/grignotin/goenv/goenv.go | 50 + .../github.com/ldez/grignotin/goenv/names.go | 276 + .../github.com/ldez/grignotin/gomod/gomod.go | 85 + .../github.com/ldez/tagliatelle/.golangci.yml | 85 +- .../github.com/ldez/tagliatelle/converter.go | 116 + vendor/github.com/ldez/tagliatelle/readme.md | 169 +- .../ldez/tagliatelle/tagliatelle.go | 236 +- vendor/github.com/ldez/usetesting/.gitignore | 2 + .../github.com/ldez/usetesting/.golangci.yml | 83 + vendor/github.com/ldez/usetesting/LICENSE | 190 + vendor/github.com/ldez/usetesting/Makefile | 15 + vendor/github.com/ldez/usetesting/readme.md | 209 + vendor/github.com/ldez/usetesting/report.go | 200 + .../github.com/ldez/usetesting/usetesting.go | 268 + .../magiconair/properties/.gitignore | 6 - .../magiconair/properties/CHANGELOG.md | 205 - .../magiconair/properties/LICENSE.md | 24 - .../magiconair/properties/README.md | 128 - .../magiconair/properties/decode.go | 289 - .../github.com/magiconair/properties/doc.go | 155 - .../magiconair/properties/integrate.go | 35 - .../github.com/magiconair/properties/lex.go | 395 - .../github.com/magiconair/properties/load.go | 293 - .../magiconair/properties/parser.go | 86 - .../magiconair/properties/properties.go | 848 - .../magiconair/properties/rangecheck.go | 31 - vendor/github.com/matoous/godox/.golangci.yml | 39 +- vendor/github.com/matoous/godox/.revive.toml | 1 + vendor/github.com/matoous/godox/Makefile | 20 + vendor/github.com/matoous/godox/godox.go | 43 +- .../mattn/go-colorable/colorable_appengine.go | 38 - .../mattn/go-colorable/colorable_others.go | 4 +- .../mattn/go-colorable/colorable_windows.go | 22 +- .../mgechev/revive/config/config.go | 40 +- .../mgechev/revive/formatter/checkstyle.go | 2 +- .../mgechev/revive/formatter/default.go | 4 + .../mgechev/revive/formatter/friendly.go | 42 +- .../mgechev/revive/formatter/plain.go | 8 +- .../mgechev/revive/formatter/sarif.go | 2 +- .../mgechev/revive/formatter/stylish.go | 14 +- .../mgechev/revive/formatter/unix.go | 8 +- .../revive/internal/astutils/ast_utils.go | 82 + .../mgechev/revive/internal/ifelse/args.go | 9 +- .../mgechev/revive/internal/ifelse/branch.go | 42 +- .../revive/internal/ifelse/branch_kind.go | 23 +- .../mgechev/revive/internal/ifelse/chain.go | 12 +- .../mgechev/revive/internal/ifelse/doc.go | 2 +- .../mgechev/revive/internal/ifelse/func.go | 6 +- .../mgechev/revive/internal/ifelse/rule.go | 112 +- .../mgechev/revive/internal/ifelse/target.go | 3 +- .../github.com/mgechev/revive/lint/config.go | 2 +- .../github.com/mgechev/revive/lint/failure.go | 62 +- vendor/github.com/mgechev/revive/lint/file.go | 48 +- .../mgechev/revive/lint/filefilter.go | 14 +- .../github.com/mgechev/revive/lint/linter.go | 39 +- .../github.com/mgechev/revive/lint/package.go | 114 +- vendor/github.com/mgechev/revive/lint/rule.go | 6 +- .../mgechev/revive/rule/add_constant.go | 136 +- .../mgechev/revive/rule/argument_limit.go | 77 +- .../github.com/mgechev/revive/rule/atomic.go | 2 +- .../mgechev/revive/rule/banned_characters.go | 31 +- .../mgechev/revive/rule/bare_return.go | 8 +- .../mgechev/revive/rule/blank_imports.go | 9 +- .../revive/rule/bool_literal_in_expr.go | 26 +- .../mgechev/revive/rule/call_to_gc.go | 2 +- .../revive/rule/cognitive_complexity.go | 43 +- .../mgechev/revive/rule/comment_spacings.go | 19 +- .../mgechev/revive/rule/comments_density.go | 19 +- .../mgechev/revive/rule/confusing_naming.go | 6 +- .../mgechev/revive/rule/confusing_results.go | 70 +- .../revive/rule/constant_logical_expr.go | 4 +- .../revive/rule/context_as_argument.go | 102 +- .../mgechev/revive/rule/context_keys_type.go | 4 +- .../mgechev/revive/rule/cyclomatic.go | 64 +- .../mgechev/revive/rule/datarace.go | 74 +- .../mgechev/revive/rule/deep_exit.go | 59 +- .../github.com/mgechev/revive/rule/defer.go | 47 +- .../mgechev/revive/rule/dot_imports.go | 27 +- .../mgechev/revive/rule/duplicated_imports.go | 4 +- .../mgechev/revive/rule/early_return.go | 37 +- .../mgechev/revive/rule/empty_block.go | 8 +- .../mgechev/revive/rule/empty_lines.go | 4 +- .../mgechev/revive/rule/enforce_map_style.go | 26 +- .../rule/enforce_repeated_arg_type_style.go | 83 +- .../revive/rule/enforce_slice_style.go | 23 +- .../mgechev/revive/rule/error_naming.go | 4 +- .../mgechev/revive/rule/error_return.go | 72 +- .../mgechev/revive/rule/error_strings.go | 20 +- .../github.com/mgechev/revive/rule/errorf.go | 4 +- .../mgechev/revive/rule/exported.go | 92 +- .../mgechev/revive/rule/file_header.go | 21 +- .../mgechev/revive/rule/file_length_limit.go | 27 +- .../mgechev/revive/rule/filename_format.go | 26 +- .../mgechev/revive/rule/flag_param.go | 79 +- .../mgechev/revive/rule/function_length.go | 156 +- .../revive/rule/function_result_limit.go | 94 +- .../mgechev/revive/rule/get_return.go | 51 +- .../mgechev/revive/rule/identical_branches.go | 6 +- .../mgechev/revive/rule/if_return.go | 2 +- .../revive/rule/import_alias_naming.go | 53 +- .../mgechev/revive/rule/import_shadowing.go | 8 +- .../mgechev/revive/rule/imports_blocklist.go | 21 +- .../revive/rule/increment_decrement.go | 4 +- .../mgechev/revive/rule/indent_error_flow.go | 25 +- .../mgechev/revive/rule/line_length_limit.go | 22 +- .../revive/rule/max_control_nesting.go | 27 +- .../mgechev/revive/rule/max_public_structs.go | 27 +- .../mgechev/revive/rule/modifies_param.go | 4 +- .../revive/rule/modifies_value_receiver.go | 174 +- .../mgechev/revive/rule/nested_structs.go | 2 +- .../revive/rule/optimize_operands_order.go | 4 +- .../mgechev/revive/rule/package_comments.go | 6 +- .../github.com/mgechev/revive/rule/range.go | 2 +- .../mgechev/revive/rule/range_val_address.go | 6 +- .../revive/rule/range_val_in_closure.go | 2 +- .../mgechev/revive/rule/receiver_naming.go | 146 +- .../revive/rule/redefines_builtin_id.go | 4 +- .../revive/rule/redundant_build_tag.go | 41 + .../revive/rule/redundant_import_alias.go | 4 +- .../revive/rule/redundant_test_main_exit.go | 79 + .../mgechev/revive/rule/string_format.go | 92 +- .../mgechev/revive/rule/struct_tag.go | 75 +- .../mgechev/revive/rule/superfluous_else.go | 23 +- .../mgechev/revive/rule/time_equal.go | 2 +- .../mgechev/revive/rule/time_naming.go | 4 +- .../revive/rule/unchecked_type_assertion.go | 24 +- .../revive/rule/unconditional_recursion.go | 71 +- .../mgechev/revive/rule/unexported_naming.go | 2 +- .../mgechev/revive/rule/unexported_return.go | 95 +- .../mgechev/revive/rule/unhandled_error.go | 24 +- .../mgechev/revive/rule/unnecessary_stmt.go | 2 +- .../mgechev/revive/rule/unreachable_code.go | 3 +- .../mgechev/revive/rule/unused_param.go | 59 +- .../mgechev/revive/rule/unused_receiver.go | 124 +- .../github.com/mgechev/revive/rule/use_any.go | 4 +- .../mgechev/revive/rule/use_errors_new.go | 60 + .../github.com/mgechev/revive/rule/utils.go | 94 +- .../mgechev/revive/rule/var_declarations.go | 43 +- .../mgechev/revive/rule/var_naming.go | 60 +- .../mgechev/revive/rule/waitgroup_by_value.go | 2 +- .../mitchellh/mapstructure/CHANGELOG.md | 96 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 279 - .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 1540 -- .../modern-go/reflect2/safe_type.go | 22 +- .../nunnatsa/ginkgolinter/README.md | 40 +- .../nunnatsa/ginkgolinter/analyzer.go | 26 +- .../github.com/nunnatsa/ginkgolinter/doc.go | 2 +- .../internal/expression/actual/actual.go | 11 +- .../internal/expression/actual/actualarg.go | 10 +- .../internal/expression/expression.go | 24 +- .../internal/expression/matcher/matcher.go | 6 +- .../internal/expression/value/value.go | 4 + .../internal/ginkgohandler/handling.go | 4 +- .../internal/ginkgoinfo/ginkgoinfo.go | 26 + .../internal/gomegahandler/dothandler.go | 48 +- .../internal/gomegahandler/handler.go | 10 +- .../internal/gomegahandler/namedhandler.go | 52 +- .../internal/gomegainfo/gomegainfo.go | 34 +- .../internal/rules/asyncfunccallrule.go | 4 +- .../internal/rules/asynctimeintervalsrule.go | 2 +- .../internal/rules/equaldifferenttypesrule.go | 2 +- .../internal/rules/equalnilrule.go | 2 +- .../ginkgolinter/internal/rules/havelen0.go | 2 +- .../internal/rules/haveoccurredrule.go | 2 +- .../internal/rules/nilcomparerule.go | 4 +- .../internal/rules/succeedrule.go | 2 +- .../nunnatsa/ginkgolinter/types/boolean.go | 32 - .../nunnatsa/ginkgolinter/types/config.go | 26 +- .../openshift/api/.ci-operator.yaml | 2 +- .../openshift/api/.golangci.go-validated.yaml | 58 + .../github.com/openshift/api/.golangci.yaml | 32 +- vendor/github.com/openshift/api/AGENTS.md | 185 + .../github.com/openshift/api/Dockerfile.ocp | 4 +- vendor/github.com/openshift/api/Makefile | 6 +- vendor/github.com/openshift/api/OWNERS | 2 +- .../openshift/api/config/v1/register.go | 2 + .../api/config/v1/types_apiserver.go | 5 +- .../api/config/v1/types_authentication.go | 24 +- .../api/config/v1/types_cluster_operator.go | 23 +- .../api/config/v1/types_cluster_version.go | 2 +- .../api/config/v1/types_infrastructure.go | 38 +- .../openshift/api/config/v1/types_insights.go | 230 + ...sion-operator_01_clusteroperators.crd.yaml | 7 +- ...1_clusterversions-CustomNoUpgrade.crd.yaml | 2 +- ...erator_01_clusterversions-Default.crd.yaml | 2 +- ...usterversions-DevPreviewNoUpgrade.crd.yaml | 2 +- ...sterversions-TechPreviewNoUpgrade.crd.yaml | 2 +- ...tor_01_apiservers-CustomNoUpgrade.crd.yaml | 5 +- ...ig-operator_01_apiservers-Default.crd.yaml | 5 +- ...01_apiservers-DevPreviewNoUpgrade.crd.yaml | 5 +- ...1_apiservers-TechPreviewNoUpgrade.crd.yaml | 5 +- ...ations-Hypershift-CustomNoUpgrade.crd.yaml | 857 - ...uthentications-Hypershift-Default.crd.yaml | 706 - ...ns-Hypershift-DevPreviewNoUpgrade.crd.yaml | 857 - ...s-Hypershift-TechPreviewNoUpgrade.crd.yaml | 857 - ...ons-SelfManagedHA-CustomNoUpgrade.crd.yaml | 857 - ...entications-SelfManagedHA-Default.crd.yaml | 187 - ...elfManagedHA-TechPreviewNoUpgrade.crd.yaml | 857 - ...nfig-operator_01_authentications.crd.yaml} | 31 +- ...r_01_clusterimagepolicies-Default.crd.yaml | 415 + ...operator_01_imagepolicies-Default.crd.yaml | 416 + ...1_infrastructures-CustomNoUpgrade.crd.yaml | 114 +- ...erator_01_infrastructures-Default.crd.yaml | 107 +- ...frastructures-DevPreviewNoUpgrade.crd.yaml | 114 +- ...rastructures-TechPreviewNoUpgrade.crd.yaml | 114 +- ...sightsdatagathers-CustomNoUpgrade.crd.yaml | 233 + ...tsdatagathers-DevPreviewNoUpgrade.crd.yaml | 233 + ...sdatagathers-TechPreviewNoUpgrade.crd.yaml | 233 + ...hift-controller-manager_01_builds.crd.yaml | 44 +- .../api/config/v1/zz_generated.deepcopy.go | 209 + ..._generated.featuregated-crd-manifests.yaml | 30 +- .../v1/zz_generated.swagger_doc_generated.go | 119 +- .../v1alpha1/types_cluster_monitoring.go | 150 +- .../config/v1alpha1/zz_generated.deepcopy.go | 80 +- .../zz_generated.swagger_doc_generated.go | 30 +- .../console/v1/types_console_cli_download.go | 2 +- .../api/console/v1/types_console_link.go | 2 +- ..._generated.featuregated-crd-manifests.yaml | 6 +- vendor/github.com/openshift/api/features.md | 45 +- .../openshift/api/imageregistry/v1/types.go | 1 - ..._generated.featuregated-crd-manifests.yaml | 3 +- .../openshift/api/legacyconfig/v1/types.go | 2 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../v1/types_controlplanemachineset.go | 2 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../api/machine/v1beta1/types_awsprovider.go | 38 + .../v1beta1/types_machinehealthcheck.go | 1 + .../machine/v1beta1/zz_generated.deepcopy.go | 26 + .../zz_generated.swagger_doc_generated.go | 12 +- .../api/operator/v1/types_ingress.go | 11 +- .../api/operator/v1/types_kubeapiserver.go | 16 + .../operator/v1/types_machineconfiguration.go | 251 +- ...01_kubeapiservers-CustomNoUpgrade.crd.yaml | 349 + ...server_01_kubeapiservers-Default.crd.yaml} | 2 + ...ubeapiservers-DevPreviewNoUpgrade.crd.yaml | 349 + ...beapiservers-TechPreviewNoUpgrade.crd.yaml | 349 + .../0000_50_console_01_consoles.crd.yaml | 32 +- ...ineconfigurations-CustomNoUpgrade.crd.yaml | 1543 ++ ...01_machineconfigurations-Default.crd.yaml} | 13 +- ...onfigurations-DevPreviewNoUpgrade.crd.yaml | 1543 ++ ...nfigurations-TechPreviewNoUpgrade.crd.yaml | 1298 ++ .../api/operator/v1/zz_generated.deepcopy.go | 91 + ..._generated.featuregated-crd-manifests.yaml | 11 +- .../v1/zz_generated.swagger_doc_generated.go | 78 +- .../config/v1/apiserver.go | 17 + .../config/v1/authentication.go | 17 + .../config/v1/azureplatformstatus.go | 19 +- .../applyconfigurations/config/v1/build.go | 17 + .../config/v1/clusterimagepolicy.go | 17 + .../config/v1/clusteroperator.go | 17 + .../config/v1/clusterversion.go | 17 + .../applyconfigurations/config/v1/console.go | 17 + .../applyconfigurations/config/v1/custom.go | 28 + .../applyconfigurations/config/v1/dns.go | 17 + .../config/v1/featuregate.go | 17 + .../config/v1/gatherconfig.go | 47 + .../config/v1/gathererconfig.go | 36 + .../config/v1/gatherers.go | 36 + .../applyconfigurations/config/v1/image.go | 17 + .../config/v1/imagecontentpolicy.go | 17 + .../config/v1/imagedigestmirrorset.go | 17 + .../config/v1/imagepolicy.go | 17 + .../config/v1/imagetagmirrorset.go | 17 + .../config/v1/infrastructure.go | 17 + .../applyconfigurations/config/v1/ingress.go | 17 + .../config/v1/insightsdatagather.go | 254 + .../config/v1/insightsdatagatherspec.go | 23 + .../applyconfigurations/config/v1/network.go | 17 + .../applyconfigurations/config/v1/node.go | 17 + .../applyconfigurations/config/v1/oauth.go | 17 + .../config/v1/operatorhub.go | 17 + .../v1/persistentvolumeclaimreference.go | 23 + .../config/v1/persistentvolumeconfig.go | 32 + .../applyconfigurations/config/v1/project.go | 17 + .../applyconfigurations/config/v1/proxy.go | 17 + .../config/v1/scheduler.go | 17 + .../applyconfigurations/config/v1/storage.go | 36 + .../config/v1alpha1/alertmanagerconfig.go | 36 + .../v1alpha1/alertmanagercustomconfig.go | 99 + .../config/v1alpha1/audit.go | 27 + .../config/v1alpha1/backup.go | 17 + .../config/v1alpha1/clusterimagepolicy.go | 17 + .../config/v1alpha1/clustermonitoring.go | 17 + .../config/v1alpha1/clustermonitoringspec.go | 20 +- .../config/v1alpha1/containerresource.go | 45 + .../config/v1alpha1/imagepolicy.go | 17 + .../config/v1alpha1/insightsdatagather.go | 17 + .../config/v1alpha1/metricsserverconfig.go | 88 + .../config/v1alpha2/insightsdatagather.go | 17 + .../applyconfigurations/internal/internal.go | 462 +- .../typed/config/v1/config_client.go | 5 + .../typed/config/v1/generated_expansion.go | 2 + .../typed/config/v1/insightsdatagather.go | 54 + .../config/v1/insightsdatagather.go | 85 + .../externalversions/config/v1/interface.go | 7 + .../informers/externalversions/generic.go | 2 + .../listers/config/v1/expansion_generated.go | 4 + .../listers/config/v1/insightsdatagather.go | 32 + .../applyconfigurations/internal/internal.go | 90 +- .../operator/v1/authentication.go | 17 + .../v1/bootimageskewenforcementconfig.go | 36 + .../v1/bootimageskewenforcementstatus.go | 45 + .../operator/v1/cloudcredential.go | 17 + .../operator/v1/clusterbootimageautomatic.go | 32 + .../operator/v1/clusterbootimagemanual.go | 45 + .../operator/v1/clustercsidriver.go | 17 + .../applyconfigurations/operator/v1/config.go | 17 + .../operator/v1/console.go | 17 + .../operator/v1/csisnapshotcontroller.go | 17 + .../applyconfigurations/operator/v1/dns.go | 17 + .../applyconfigurations/operator/v1/etcd.go | 17 + .../operator/v1/ingresscontroller.go | 17 + .../operator/v1/insightsoperator.go | 17 + .../v1/irreconcilablevalidationoverrides.go | 29 + .../operator/v1/kubeapiserver.go | 17 + .../operator/v1/kubeapiserverspec.go | 9 + .../operator/v1/kubecontrollermanager.go | 17 + .../operator/v1/kubescheduler.go | 17 + .../operator/v1/kubestorageversionmigrator.go | 17 + .../operator/v1/machineconfiguration.go | 17 + .../operator/v1/machineconfigurationspec.go | 22 +- .../operator/v1/machineconfigurationstatus.go | 17 +- .../operator/v1/network.go | 17 + .../applyconfigurations/operator/v1/olm.go | 17 + .../operator/v1/openshiftapiserver.go | 17 + .../operator/v1/openshiftcontrollermanager.go | 17 + .../operator/v1/serviceca.go | 17 + .../operator/v1/servicecatalogapiserver.go | 17 + .../v1/servicecatalogcontrollermanager.go | 17 + .../operator/v1/storage.go | 17 + .../testutils/Makefile | 4 +- .../clusteroperator/v1helpers/status.go | 2 +- .../client_cert_rotation_controller.go | 28 +- .../pkg/operator/certrotation/target.go | 5 + .../config_observer_controller.go | 4 +- .../resource/resourceapply/storage.go | 3 + .../resource/resourceread/networking.go | 2 +- .../pkg/operator/v1helpers/informers.go | 10 +- .../github.com/raeperd/recvcheck/.gitignore | 3 +- .../raeperd/recvcheck/.golangci.yml | 10 + vendor/github.com/raeperd/recvcheck/Makefile | 14 +- vendor/github.com/raeperd/recvcheck/README.md | 6 +- .../github.com/raeperd/recvcheck/analyzer.go | 122 +- .../go-internal}/diff/diff.go | 9 +- .../github.com/sagikazarmark/locafero/.envrc | 4 +- .../sagikazarmark/locafero/finder.go | 6 +- .../sagikazarmark/locafero/flake.lock | 303 +- .../sagikazarmark/locafero/flake.nix | 21 +- .../github.com/sagikazarmark/locafero/glob.go | 5 + .../sagikazarmark/locafero/glob_windows.go | 8 + .../github.com/sagikazarmark/slog-shim/.envrc | 4 - .../sagikazarmark/slog-shim/.gitignore | 4 - .../sagikazarmark/slog-shim/README.md | 81 - .../sagikazarmark/slog-shim/attr.go | 74 - .../sagikazarmark/slog-shim/attr_120.go | 75 - .../sagikazarmark/slog-shim/flake.lock | 273 - .../sagikazarmark/slog-shim/flake.nix | 57 - .../sagikazarmark/slog-shim/handler.go | 45 - .../sagikazarmark/slog-shim/handler_120.go | 45 - .../sagikazarmark/slog-shim/json_handler.go | 23 - .../slog-shim/json_handler_120.go | 24 - .../sagikazarmark/slog-shim/level.go | 61 - .../sagikazarmark/slog-shim/level_120.go | 61 - .../sagikazarmark/slog-shim/logger.go | 98 - .../sagikazarmark/slog-shim/logger_120.go | 99 - .../sagikazarmark/slog-shim/record.go | 31 - .../sagikazarmark/slog-shim/record_120.go | 32 - .../sagikazarmark/slog-shim/text_handler.go | 23 - .../slog-shim/text_handler_120.go | 24 - .../sagikazarmark/slog-shim/value.go | 109 - .../sagikazarmark/slog-shim/value_120.go | 110 - .../santhosh-tekuri/jsonschema/v5/.gitignore | 4 - .../santhosh-tekuri/jsonschema/v5/README.md | 220 - .../santhosh-tekuri/jsonschema/v5/compiler.go | 812 - .../santhosh-tekuri/jsonschema/v5/content.go | 29 - .../santhosh-tekuri/jsonschema/v5/doc.go | 49 - .../santhosh-tekuri/jsonschema/v5/draft.go | 1454 -- .../santhosh-tekuri/jsonschema/v5/errors.go | 129 - .../jsonschema/v5/extension.go | 116 - .../santhosh-tekuri/jsonschema/v5/format.go | 567 - .../jsonschema/v5/httploader/httploader.go | 38 - .../santhosh-tekuri/jsonschema/v5/loader.go | 60 - .../santhosh-tekuri/jsonschema/v5/output.go | 77 - .../santhosh-tekuri/jsonschema/v5/resource.go | 280 - .../santhosh-tekuri/jsonschema/v5/schema.go | 900 - .../jsonschema/{v5 => v6}/.gitmodules | 1 + .../jsonschema/v6/.golangci.yml | 5 + .../jsonschema/v6/.pre-commit-hooks.yaml | 7 + .../jsonschema/{v5 => v6}/LICENSE | 0 .../santhosh-tekuri/jsonschema/v6/README.md | 86 + .../santhosh-tekuri/jsonschema/v6/compiler.go | 332 + .../santhosh-tekuri/jsonschema/v6/content.go | 51 + .../santhosh-tekuri/jsonschema/v6/draft.go | 360 + .../santhosh-tekuri/jsonschema/v6/format.go | 708 + .../santhosh-tekuri/jsonschema/v6/go.work | 8 + .../jsonschema/v6/kind/kind.go | 651 + .../santhosh-tekuri/jsonschema/v6/loader.go | 266 + .../jsonschema/v6/metaschemas/draft-04/schema | 151 + .../jsonschema/v6/metaschemas/draft-06/schema | 150 + .../jsonschema/v6/metaschemas/draft-07/schema | 172 + .../metaschemas/draft/2019-09/meta/applicator | 55 + .../v6/metaschemas/draft/2019-09/meta/content | 15 + .../v6/metaschemas/draft/2019-09/meta/core | 56 + .../v6/metaschemas/draft/2019-09/meta/format | 13 + .../metaschemas/draft/2019-09/meta/meta-data | 35 + .../metaschemas/draft/2019-09/meta/validation | 97 + .../v6/metaschemas/draft/2019-09/schema | 41 + .../metaschemas/draft/2020-12/meta/applicator | 47 + .../v6/metaschemas/draft/2020-12/meta/content | 15 + .../v6/metaschemas/draft/2020-12/meta/core | 50 + .../draft/2020-12/meta/format-annotation | 13 + .../draft/2020-12/meta/format-assertion | 13 + .../metaschemas/draft/2020-12/meta/meta-data | 35 + .../draft/2020-12/meta/unevaluated | 14 + .../metaschemas/draft/2020-12/meta/validation | 97 + .../v6/metaschemas/draft/2020-12/schema | 57 + .../jsonschema/v6/objcompiler.go | 549 + .../santhosh-tekuri/jsonschema/v6/output.go | 212 + .../santhosh-tekuri/jsonschema/v6/position.go | 142 + .../santhosh-tekuri/jsonschema/v6/root.go | 202 + .../santhosh-tekuri/jsonschema/v6/roots.go | 289 + .../santhosh-tekuri/jsonschema/v6/schema.go | 248 + .../santhosh-tekuri/jsonschema/v6/util.go | 464 + .../jsonschema/v6/validator.go | 975 + .../santhosh-tekuri/jsonschema/v6/vocab.go | 106 + .../usestdlibvars/pkg/analyzer/analyzer.go | 35 +- .../securego/gosec/v2/.golangci.yml | 5 + .../securego/gosec/v2/.goreleaser.yml | 1 + .../securego/gosec/v2/CONTRIBUTING.md | 81 + vendor/github.com/securego/gosec/v2/README.md | 32 +- vendor/github.com/securego/gosec/v2/RULES.md | 61 + .../github.com/securego/gosec/v2/action.yml | 2 +- .../github.com/securego/gosec/v2/analyzer.go | 110 +- .../gosec/v2/analyzers/conversion_overflow.go | 75 +- .../{hardcodedNonce.go => hardcoded_nonce.go} | 7 +- .../securego/gosec/v2/rules/errors.go | 2 +- .../securego/gosec/v2/rules/fileperms.go | 2 +- .../gosec/v2/rules/implicit_aliasing.go | 2 +- .../shazow/go-diff/difflib/differ.go | 39 - .../slog-shim => spf13/afero}/.editorconfig | 8 +- vendor/github.com/spf13/afero/.golangci.yaml | 18 + vendor/github.com/spf13/afero/README.md | 2 +- vendor/github.com/spf13/afero/iofs.go | 1 - vendor/github.com/spf13/afero/memmap.go | 2 - vendor/github.com/spf13/cast/README.md | 2 +- vendor/github.com/spf13/cast/caste.go | 98 +- vendor/github.com/spf13/viper/.envrc | 4 +- vendor/github.com/spf13/viper/.golangci.yaml | 3 - vendor/github.com/spf13/viper/README.md | 21 +- vendor/github.com/spf13/viper/UPDATES.md | 126 + vendor/github.com/spf13/viper/encoding.go | 181 + vendor/github.com/spf13/viper/experimental.go | 8 + vendor/github.com/spf13/viper/file.go | 54 +- vendor/github.com/spf13/viper/file_finder.go | 38 - vendor/github.com/spf13/viper/finder.go | 55 + vendor/github.com/spf13/viper/flake.lock | 303 +- vendor/github.com/spf13/viper/flake.nix | 2 +- .../spf13/viper/internal/encoding/decoder.go | 61 - .../spf13/viper/internal/encoding/encoder.go | 60 - .../spf13/viper/internal/encoding/error.go | 7 - .../viper/internal/encoding/hcl/codec.go | 40 - .../viper/internal/encoding/ini/codec.go | 99 - .../viper/internal/encoding/ini/map_utils.go | 74 - .../internal/encoding/javaproperties/codec.go | 86 - .../encoding/javaproperties/map_utils.go | 74 - .../spf13/viper/internal/features/finder.go | 5 + .../viper/internal/features/finder_default.go | 5 + vendor/github.com/spf13/viper/logger.go | 39 +- vendor/github.com/spf13/viper/remote.go | 256 + vendor/github.com/spf13/viper/util.go | 11 +- vendor/github.com/spf13/viper/viper.go | 539 +- .../pkg/analyzer/analyzer.go | 12 +- .../tdakkota/asciicheck/asciicheck.go | 107 +- .../github.com/tetafro/godot/.goreleaser.yml | 2 + vendor/github.com/tetafro/godot/README.md | 1 + vendor/github.com/tetafro/godot/getters.go | 82 +- vendor/github.com/tetafro/godot/settings.go | 2 + .../bodyclose/passes/bodyclose/bodyclose.go | 62 +- .../wrapcheck/v2/wrapcheck/wrapcheck.go | 6 +- .../github.com/ultraware/funlen/.golangci.yml | 2 + vendor/github.com/ultraware/funlen/README.md | 20 + vendor/github.com/ultraware/funlen/funlen.go | 115 + vendor/github.com/ultraware/funlen/main.go | 124 - .../github.com/ultraware/whitespace/README.md | 2 +- .../ultraware/whitespace/whitespace.go | 149 +- vendor/github.com/uudashr/gocognit/README.md | 119 +- vendor/github.com/uudashr/gocognit/doc.go | 3 +- .../github.com/uudashr/gocognit/gocognit.go | 204 +- vendor/github.com/uudashr/gocognit/recv.go | 1 + .../uudashr/gocognit/recv_pre118.go | 1 + .../iface/internal/directive/directive.go | 2 +- .../github.com/uudashr/iface/opaque/opaque.go | 35 +- .../github.com/uudashr/iface/unused/unused.go | 46 +- vendor/go-simpler.org/sloglint/sloglint.go | 13 +- vendor/go.yaml.in/yaml/v3/LICENSE | 50 + .../yaml/v3/NOTICE} | 16 +- vendor/go.yaml.in/yaml/v3/README.md | 171 + vendor/go.yaml.in/yaml/v3/apic.go | 747 + vendor/go.yaml.in/yaml/v3/decode.go | 1018 + vendor/go.yaml.in/yaml/v3/emitterc.go | 2054 ++ vendor/go.yaml.in/yaml/v3/encode.go | 577 + vendor/go.yaml.in/yaml/v3/parserc.go | 1274 ++ vendor/go.yaml.in/yaml/v3/readerc.go | 434 + vendor/go.yaml.in/yaml/v3/resolve.go | 326 + vendor/go.yaml.in/yaml/v3/scannerc.go | 3040 +++ vendor/go.yaml.in/yaml/v3/sorter.go | 134 + vendor/go.yaml.in/yaml/v3/writerc.go | 48 + vendor/go.yaml.in/yaml/v3/yaml.go | 703 + vendor/go.yaml.in/yaml/v3/yamlh.go | 811 + vendor/go.yaml.in/yaml/v3/yamlprivateh.go | 198 + .../x/exp/constraints/constraints.go | 54 - vendor/golang.org/x/exp/maps/maps.go | 86 - vendor/golang.org/x/exp/slog/attr.go | 102 - vendor/golang.org/x/exp/slog/doc.go | 316 - vendor/golang.org/x/exp/slog/handler.go | 577 - .../x/exp/slog/internal/buffer/buffer.go | 84 - .../x/exp/slog/internal/ignorepc.go | 9 - vendor/golang.org/x/exp/slog/json_handler.go | 336 - vendor/golang.org/x/exp/slog/level.go | 201 - vendor/golang.org/x/exp/slog/logger.go | 343 - vendor/golang.org/x/exp/slog/noplog.bench | 36 - vendor/golang.org/x/exp/slog/record.go | 207 - vendor/golang.org/x/exp/slog/text_handler.go | 161 - vendor/golang.org/x/exp/slog/value.go | 456 - vendor/golang.org/x/exp/slog/value_119.go | 53 - vendor/golang.org/x/exp/slog/value_120.go | 39 - vendor/golang.org/x/net/http2/http2.go | 2 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 21 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 14 +- .../golang.org/x/sys/unix/affinity_linux.go | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 3 + .../golang.org/x/sys/unix/syscall_darwin.go | 56 +- .../golang.org/x/sys/unix/syscall_solaris.go | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 44 +- .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/zsyscall_solaris_amd64.go | 8 +- .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 78 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 30 +- .../x/sys/unix/ztypes_linux_amd64.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 32 +- .../x/sys/unix/ztypes_linux_arm64.go | 28 +- .../x/sys/unix/ztypes_linux_loong64.go | 28 +- .../x/sys/unix/ztypes_linux_mips.go | 30 +- .../x/sys/unix/ztypes_linux_mips64.go | 28 +- .../x/sys/unix/ztypes_linux_mips64le.go | 28 +- .../x/sys/unix/ztypes_linux_mipsle.go | 30 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 32 +- .../x/sys/unix/ztypes_linux_ppc64.go | 28 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 28 +- .../x/sys/unix/ztypes_linux_riscv64.go | 28 +- .../x/sys/unix/ztypes_linux_s390x.go | 28 +- .../x/sys/unix/ztypes_linux_sparc64.go | 28 +- .../golang.org/x/sys/windows/types_windows.go | 6 + .../x/sys/windows/zsyscall_windows.go | 966 +- vendor/golang.org/x/term/term_windows.go | 4 +- vendor/golang.org/x/term/terminal.go | 9 +- .../x/text/feature/plural/common.go | 70 + .../x/text/feature/plural/message.go | 244 + .../x/text/feature/plural/plural.go | 262 + .../x/text/feature/plural/tables.go | 552 + .../x/text/internal/catmsg/catmsg.go | 417 + .../x/text/internal/catmsg/codec.go | 407 + .../x/text/internal/catmsg/varint.go | 62 + .../x/text/internal/format/format.go | 41 + .../x/text/internal/format/parser.go | 358 + vendor/golang.org/x/text/internal/internal.go | 49 + vendor/golang.org/x/text/internal/match.go | 67 + .../x/text/internal/number/common.go | 55 + .../x/text/internal/number/decimal.go | 500 + .../x/text/internal/number/format.go | 533 + .../x/text/internal/number/number.go | 152 + .../x/text/internal/number/pattern.go | 485 + .../internal/number/roundingmode_string.go | 30 + .../x/text/internal/number/tables.go | 1219 ++ .../x/text/internal/stringset/set.go | 86 + vendor/golang.org/x/text/message/catalog.go | 36 + .../x/text/message/catalog/catalog.go | 365 + .../golang.org/x/text/message/catalog/dict.go | 129 + .../golang.org/x/text/message/catalog/go19.go | 15 + .../x/text/message/catalog/gopre19.go | 23 + vendor/golang.org/x/text/message/doc.go | 99 + vendor/golang.org/x/text/message/format.go | 510 + vendor/golang.org/x/text/message/message.go | 192 + vendor/golang.org/x/text/message/print.go | 984 + .../tools/go/analysis/passes/assign/assign.go | 95 +- .../analysis/passes/stdversion/stdversion.go | 135 + .../tools/go/analysis/passes/waitgroup/doc.go | 34 + .../go/analysis/passes/waitgroup/waitgroup.go | 91 + .../x/tools/go/ast/astutil/enclosing.go | 2 +- .../x/tools/go/ast/inspector/inspector.go | 1 + .../x/tools/go/ast/inspector/typeof.go | 3 - vendor/golang.org/x/tools/go/packages/doc.go | 2 + .../golang.org/x/tools/go/packages/golist.go | 18 +- .../x/tools/go/packages/golist_overlay.go | 2 +- vendor/golang.org/x/tools/go/ssa/builder.go | 2 +- .../tools/internal/imports/source_modindex.go | 47 +- .../x/tools/internal/modindex/directories.go | 148 +- .../x/tools/internal/modindex/index.go | 233 +- .../x/tools/internal/modindex/lookup.go | 16 +- .../x/tools/internal/modindex/modindex.go | 205 +- .../x/tools/internal/modindex/symbols.go | 69 +- .../x/tools/internal/modindex/types.go | 25 - .../internal/packagesinternal/packages.go | 6 + .../gomodules.xyz/jsonpatch/v2/jsonpatch.go | 17 +- .../honnef.co/go/tools/analysis/code/code.go | 4 +- vendor/honnef.co/go/tools/go/ir/builder.go | 6 +- vendor/honnef.co/go/tools/go/ir/emit.go | 2 +- .../go/tools/knowledge/deprecated.go | 12 +- vendor/honnef.co/go/tools/pattern/match.go | 3 +- .../honnef.co/go/tools/simple/s1008/s1008.go | 35 +- .../honnef.co/go/tools/simple/s1009/s1009.go | 35 +- .../go/tools/staticcheck/sa4023/sa4023.go | 2 +- .../go/tools/staticcheck/sa9007/sa9007.go | 2 +- vendor/honnef.co/go/tools/unused/unused.go | 44 +- .../v1beta1/generated.pb.go | 5038 +++-- .../v1beta1/generated.proto | 301 + .../admissionregistration/v1beta1/register.go | 4 + .../admissionregistration/v1beta1/types.go | 331 +- .../v1beta1/types_swagger_doc_generated.go | 95 + .../v1beta1/zz_generated.deepcopy.go | 254 +- .../zz_generated.prerelease-lifecycle.go | 72 + vendor/k8s.io/api/apps/v1/generated.proto | 2 +- vendor/k8s.io/api/apps/v1/types.go | 2 +- .../apps/v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.proto | 3 + vendor/k8s.io/api/apps/v1beta1/types.go | 4 + .../k8s.io/api/apps/v1beta2/generated.proto | 5 +- vendor/k8s.io/api/apps/v1beta2/types.go | 6 +- .../v1beta2/types_swagger_doc_generated.go | 2 +- .../api/authorization/v1/generated.proto | 6 - vendor/k8s.io/api/authorization/v1/types.go | 6 - .../v1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/autoscaling/v1/generated.proto | 3 + vendor/k8s.io/api/autoscaling/v1/types.go | 4 + vendor/k8s.io/api/batch/v1/generated.proto | 7 +- vendor/k8s.io/api/batch/v1/types.go | 7 +- .../batch/v1/types_swagger_doc_generated.go | 6 +- .../api/certificates/v1/generated.proto | 7 + vendor/k8s.io/api/certificates/v1/types.go | 7 + .../api/certificates/v1alpha1/generated.pb.go | 1511 +- .../api/certificates/v1alpha1/generated.proto | 205 + .../api/certificates/v1alpha1/register.go | 2 + .../k8s.io/api/certificates/v1alpha1/types.go | 231 + .../v1alpha1/types_swagger_doc_generated.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/certificates/v1beta1/generated.proto | 7 + .../k8s.io/api/certificates/v1beta1/types.go | 7 + vendor/k8s.io/api/core/v1/generated.pb.go | 5809 ++++-- vendor/k8s.io/api/core/v1/generated.proto | 305 +- vendor/k8s.io/api/core/v1/types.go | 320 +- .../core/v1/types_swagger_doc_generated.go | 149 +- .../api/core/v1/zz_generated.deepcopy.go | 155 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 2 + .../api/extensions/v1beta1/generated.proto | 5 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../v1beta1/zz_generated.validations.go | 78 + .../k8s.io/api/networking/v1/generated.proto | 7 +- vendor/k8s.io/api/networking/v1/types.go | 7 +- .../v1/types_swagger_doc_generated.go | 2 +- .../api/networking/v1alpha1/generated.pb.go | 1929 -- .../api/networking/v1alpha1/generated.proto | 142 - .../k8s.io/api/networking/v1alpha1/types.go | 154 - .../v1alpha1/types_swagger_doc_generated.go | 110 - .../networking/v1alpha1/well_known_labels.go | 33 - .../v1alpha1/zz_generated.deepcopy.go | 229 - .../zz_generated.prerelease-lifecycle.go | 94 - vendor/k8s.io/api/resource/v1/devicetaint.go | 35 + .../v1alpha1 => resource/v1}/doc.go | 9 +- vendor/k8s.io/api/resource/v1/generated.pb.go | 12777 ++++++++++++ vendor/k8s.io/api/resource/v1/generated.proto | 1589 ++ .../v1alpha1 => resource/v1}/register.go | 46 +- vendor/k8s.io/api/resource/v1/types.go | 1873 ++ .../v1/types_swagger_doc_generated.go | 510 + .../api/resource/v1/zz_generated.deepcopy.go | 1257 ++ .../v1/zz_generated.prerelease-lifecycle.go | 70 + .../api/resource/v1alpha3/devicetaint.go | 35 + .../api/resource/v1alpha3/generated.pb.go | 11648 +---------- .../api/resource/v1alpha3/generated.proto | 1160 +- .../k8s.io/api/resource/v1alpha3/register.go | 8 - vendor/k8s.io/api/resource/v1alpha3/types.go | 1534 +- .../v1alpha3/types_swagger_doc_generated.go | 395 - .../v1alpha3/zz_generated.deepcopy.go | 1099 +- .../zz_generated.prerelease-lifecycle.go | 196 - .../api/resource/v1beta1/generated.pb.go | 2280 ++- .../api/resource/v1beta1/generated.proto | 313 +- vendor/k8s.io/api/resource/v1beta1/types.go | 331 +- .../v1beta1/types_swagger_doc_generated.go | 102 +- .../resource/v1beta1/zz_generated.deepcopy.go | 165 + .../api/resource/v1beta2/generated.pb.go | 2520 ++- .../api/resource/v1beta2/generated.proto | 313 +- vendor/k8s.io/api/resource/v1beta2/types.go | 331 +- .../v1beta2/types_swagger_doc_generated.go | 104 +- .../resource/v1beta2/zz_generated.deepcopy.go | 165 + vendor/k8s.io/api/storage/v1/generated.pb.go | 828 +- vendor/k8s.io/api/storage/v1/generated.proto | 47 +- vendor/k8s.io/api/storage/v1/register.go | 3 + vendor/k8s.io/api/storage/v1/types.go | 59 +- .../storage/v1/types_swagger_doc_generated.go | 27 +- .../api/storage/v1/zz_generated.deepcopy.go | 66 + .../v1/zz_generated.prerelease-lifecycle.go | 12 + vendor/k8s.io/api/storage/v1alpha1/types.go | 4 + .../zz_generated.prerelease-lifecycle.go | 12 + .../api/storage/v1beta1/generated.proto | 7 +- vendor/k8s.io/api/storage/v1beta1/types.go | 11 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../zz_generated.prerelease-lifecycle.go | 12 + .../pkg/apis/apiextensions/v1/conversion.go | 2 +- .../pkg/apis/apiextensions/v1/defaults.go | 4 +- .../apis/apiextensions/v1beta1/conversion.go | 2 +- .../apis/apiextensions/v1beta1/defaults.go | 6 +- .../v1/customresourcedefinition.go | 17 + .../v1beta1/customresourcedefinition.go | 17 + .../apimachinery/pkg/api/errors/errors.go | 2 +- .../pkg/api/operation/operation.go | 50 +- .../k8s.io/apimachinery/pkg/api/safe/safe.go | 59 + .../apimachinery/pkg/api/validate/README.md | 64 + .../apimachinery/pkg/api/validate/common.go | 28 + .../api/validate/constraints/constraints.go | 32 + .../pkg/api/validate/content/errors.go | 39 + .../apimachinery/pkg/api/validate/doc.go | 50 + .../apimachinery/pkg/api/validate/each.go | 171 + .../apimachinery/pkg/api/validate/enum.go | 40 + .../apimachinery/pkg/api/validate/equality.go | 38 + .../pkg/api/validate/immutable.go | 64 + .../apimachinery/pkg/api/validate/item.go | 72 + .../apimachinery/pkg/api/validate/limits.go | 37 + .../apimachinery/pkg/api/validate/required.go | 133 + .../apimachinery/pkg/api/validate/subfield.go | 46 + .../apimachinery/pkg/api/validate/testing.go | 35 + .../apimachinery/pkg/api/validate/union.go | 212 + .../pkg/api/validate/zeroorone.go | 54 + .../pkg/api/validation/objectmeta.go | 6 +- .../internalversion/validation/validation.go | 76 - .../apimachinery/pkg/apis/meta/v1/types.go | 16 +- .../pkg/apis/meta/v1/validation/validation.go | 4 +- .../k8s.io/apimachinery/pkg/labels/labels.go | 9 + .../apimachinery/pkg/labels/selector.go | 32 +- .../apimachinery/pkg/runtime/converter.go | 29 +- .../apimachinery/pkg/runtime/interfaces.go | 6 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 78 +- .../pkg/runtime/serializer/cbor/cbor.go | 6 - .../runtime/serializer/cbor/direct/direct.go | 22 +- .../serializer/cbor/internal/modes/custom.go | 422 - .../serializer/cbor/internal/modes/decode.go | 22 +- .../serializer/cbor/internal/modes/encode.go | 24 +- .../cbor/internal/modes/transcoding.go | 108 + .../k8s.io/apimachinery/pkg/util/diff/cmp.go | 31 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 142 +- .../apimachinery/pkg/util/diff/legacy_diff.go | 67 + .../apimachinery/pkg/util/errors/errors.go | 2 + .../pkg/util/managedfields/extract.go | 4 +- .../pkg/util/managedfields/fieldmanager.go | 2 +- .../pkg/util/managedfields/gvkparser.go | 4 +- .../managedfields/internal/capmanagers.go | 2 +- .../util/managedfields/internal/conflict.go | 4 +- .../managedfields/internal/fieldmanager.go | 2 +- .../pkg/util/managedfields/internal/fields.go | 2 +- .../internal/lastappliedmanager.go | 4 +- .../managedfields/internal/managedfields.go | 2 +- .../internal/managedfieldsupdater.go | 2 +- .../util/managedfields/internal/manager.go | 2 +- .../managedfields/internal/pathelement.go | 4 +- .../internal/runtimetypeconverter.go | 62 + .../managedfields/internal/skipnonapplied.go | 3 + .../util/managedfields/internal/stripmeta.go | 2 +- .../managedfields/internal/structuredmerge.go | 6 +- .../managedfields/internal/typeconverter.go | 6 +- .../internal/versionconverter.go | 6 +- .../pkg/util/managedfields/scalehandler.go | 2 +- .../pkg/util/managedfields/typeconverter.go | 9 + .../apimachinery/pkg/util/runtime/runtime.go | 25 +- .../util/validation/field/error_matcher.go | 14 +- .../pkg/util/validation/field/errors.go | 78 +- .../pkg/util/validation/validation.go | 2 +- .../apimachinery/pkg/util/yaml/decoder.go | 4 +- .../v1/mutatingwebhookconfiguration.go | 17 + .../v1/validatingadmissionpolicy.go | 17 + .../v1/validatingadmissionpolicybinding.go | 17 + .../v1/validatingwebhookconfiguration.go | 17 + .../v1alpha1/mutatingadmissionpolicy.go | 17 + .../mutatingadmissionpolicybinding.go | 17 + .../v1alpha1/validatingadmissionpolicy.go | 17 + .../validatingadmissionpolicybinding.go | 17 + .../v1beta1/applyconfiguration.go | 39 + .../v1beta1/jsonpatch.go | 39 + .../v1beta1/mutatingadmissionpolicy.go | 270 + .../v1beta1/mutatingadmissionpolicybinding.go | 270 + .../mutatingadmissionpolicybindingspec.go | 57 + .../v1beta1/mutatingadmissionpolicyspec.go | 113 + .../v1beta1/mutatingwebhook.go | 27 +- .../v1beta1/mutatingwebhookconfiguration.go | 17 + .../admissionregistration/v1beta1/mutation.go | 61 + .../v1beta1/validatingadmissionpolicy.go | 17 + .../validatingadmissionpolicybinding.go | 17 + .../v1beta1/validatingwebhookconfiguration.go | 17 + .../v1alpha1/storageversion.go | 17 + .../apps/v1/controllerrevision.go | 17 + .../applyconfigurations/apps/v1/daemonset.go | 17 + .../applyconfigurations/apps/v1/deployment.go | 17 + .../applyconfigurations/apps/v1/replicaset.go | 17 + .../apps/v1/statefulset.go | 17 + .../apps/v1beta1/controllerrevision.go | 17 + .../apps/v1beta1/deployment.go | 17 + .../apps/v1beta1/statefulset.go | 17 + .../apps/v1beta2/controllerrevision.go | 17 + .../apps/v1beta2/daemonset.go | 17 + .../apps/v1beta2/deployment.go | 17 + .../apps/v1beta2/replicaset.go | 17 + .../applyconfigurations/apps/v1beta2/scale.go | 17 + .../apps/v1beta2/statefulset.go | 17 + .../autoscaling/v1/horizontalpodautoscaler.go | 17 + .../autoscaling/v1/scale.go | 17 + .../autoscaling/v2/horizontalpodautoscaler.go | 17 + .../v2beta1/horizontalpodautoscaler.go | 17 + .../v2beta2/horizontalpodautoscaler.go | 17 + .../applyconfigurations/batch/v1/cronjob.go | 17 + .../applyconfigurations/batch/v1/job.go | 17 + .../batch/v1/jobtemplatespec.go | 6 + .../batch/v1beta1/cronjob.go | 17 + .../batch/v1beta1/jobtemplatespec.go | 6 + .../v1/certificatesigningrequest.go | 17 + .../v1alpha1/clustertrustbundle.go | 17 + .../v1alpha1/podcertificaterequest.go} | 109 +- .../v1alpha1/podcertificaterequestspec.go | 128 + .../v1alpha1/podcertificaterequeststatus.go | 85 + .../v1beta1/certificatesigningrequest.go | 17 + .../v1beta1/clustertrustbundle.go | 17 + .../coordination/v1/lease.go | 17 + .../coordination/v1alpha2/leasecandidate.go | 17 + .../coordination/v1beta1/lease.go | 17 + .../coordination/v1beta1/leasecandidate.go | 17 + .../core/v1/componentstatus.go | 17 + .../applyconfigurations/core/v1/configmap.go | 17 + .../applyconfigurations/core/v1/container.go | 14 + .../v1/containerextendedresourcerequest.go | 57 + .../core/v1/containerrestartrule.go | 52 + .../v1/containerrestartruleonexitcodes.go | 54 + .../applyconfigurations/core/v1/endpoints.go | 17 + .../core/v1/envvarsource.go | 9 + .../core/v1/ephemeralcontainer.go | 13 + .../core/v1/ephemeralcontainercommon.go | 14 + .../applyconfigurations/core/v1/event.go | 17 + .../core/v1/filekeyselector.go | 66 + .../applyconfigurations/core/v1/limitrange.go | 17 + .../applyconfigurations/core/v1/namespace.go | 17 + .../applyconfigurations/core/v1/node.go | 17 + .../core/v1/persistentvolume.go | 17 + .../core/v1/persistentvolumeclaim.go | 17 + .../core/v1/persistentvolumeclaimtemplate.go | 6 + .../applyconfigurations/core/v1/pod.go | 17 + .../core/v1/podcertificateprojection.go | 84 + .../core/v1/podextendedresourceclaimstatus.go | 53 + .../applyconfigurations/core/v1/podspec.go | 9 + .../applyconfigurations/core/v1/podstatus.go | 43 +- .../core/v1/podtemplate.go | 17 + .../core/v1/podtemplatespec.go | 6 + .../core/v1/replicationcontroller.go | 17 + .../core/v1/resourcequota.go | 17 + .../applyconfigurations/core/v1/secret.go | 17 + .../applyconfigurations/core/v1/service.go | 17 + .../core/v1/serviceaccount.go | 17 + .../core/v1/volumeprojection.go | 9 + .../discovery/v1/endpointslice.go | 17 + .../discovery/v1beta1/endpointslice.go | 17 + .../applyconfigurations/events/v1/event.go | 17 + .../events/v1beta1/event.go | 17 + .../extensions/v1beta1/daemonset.go | 17 + .../extensions/v1beta1/deployment.go | 17 + .../extensions/v1beta1/ingress.go | 17 + .../extensions/v1beta1/networkpolicy.go | 17 + .../extensions/v1beta1/replicaset.go | 17 + .../extensions/v1beta1/scale.go | 17 + .../flowcontrol/v1/flowschema.go | 17 + .../v1/prioritylevelconfiguration.go | 17 + .../flowcontrol/v1beta1/flowschema.go | 17 + .../v1beta1/prioritylevelconfiguration.go | 17 + .../flowcontrol/v1beta2/flowschema.go | 17 + .../v1beta2/prioritylevelconfiguration.go | 17 + .../flowcontrol/v1beta3/flowschema.go | 17 + .../v1beta3/prioritylevelconfiguration.go | 17 + .../applyconfigurations/internal/internal.go | 1057 +- .../meta/v1/deleteoptions.go | 11 + .../applyconfigurations/meta/v1/objectmeta.go | 5 + .../applyconfigurations/meta/v1/typemeta.go | 10 + .../meta/v1/unstructured.go | 2 +- .../networking/v1/ingress.go | 17 + .../networking/v1/ingressclass.go | 17 + .../networking/v1/ipaddress.go | 17 + .../networking/v1/networkpolicy.go | 17 + .../networking/v1/servicecidr.go | 17 + .../networking/v1alpha1/ipaddress.go | 253 - .../networking/v1alpha1/ipaddressspec.go | 39 - .../networking/v1alpha1/parentreference.go | 66 - .../networking/v1alpha1/servicecidrspec.go | 41 - .../networking/v1alpha1/servicecidrstatus.go | 48 - .../networking/v1beta1/ingress.go | 17 + .../networking/v1beta1/ingressclass.go | 17 + .../networking/v1beta1/ipaddress.go | 17 + .../networking/v1beta1/servicecidr.go | 17 + .../node/v1/runtimeclass.go | 17 + .../node/v1alpha1/runtimeclass.go | 17 + .../node/v1beta1/runtimeclass.go | 17 + .../applyconfigurations/policy/v1/eviction.go | 17 + .../policy/v1/poddisruptionbudget.go | 17 + .../policy/v1beta1/eviction.go | 17 + .../policy/v1beta1/poddisruptionbudget.go | 17 + .../rbac/v1/clusterrole.go | 17 + .../rbac/v1/clusterrolebinding.go | 17 + .../applyconfigurations/rbac/v1/role.go | 17 + .../rbac/v1/rolebinding.go | 17 + .../rbac/v1alpha1/clusterrole.go | 17 + .../rbac/v1alpha1/clusterrolebinding.go | 17 + .../applyconfigurations/rbac/v1alpha1/role.go | 17 + .../rbac/v1alpha1/rolebinding.go | 17 + .../rbac/v1beta1/clusterrole.go | 17 + .../rbac/v1beta1/clusterrolebinding.go | 17 + .../applyconfigurations/rbac/v1beta1/role.go | 17 + .../rbac/v1beta1/rolebinding.go | 17 + .../{v1alpha3 => v1}/allocateddevicestatus.go | 17 +- .../{v1alpha3 => v1}/allocationresult.go | 20 +- .../resource/v1/capacityrequestpolicy.go | 63 + .../resource/v1/capacityrequestpolicyrange.go | 61 + .../resource/v1/capacityrequirements.go | 50 + .../resource/v1/celdeviceselector.go | 39 + .../resource/{v1alpha3 => v1}/counter.go | 2 +- .../resource/{v1alpha3 => v1}/counterset.go | 2 +- .../applyconfigurations/resource/v1/device.go | 169 + .../deviceallocationconfiguration.go | 10 +- .../deviceallocationresult.go | 2 +- .../{v1alpha3 => v1}/deviceattribute.go | 2 +- .../resource/v1/devicecapacity.go | 52 + .../resource/{v1alpha3 => v1}/deviceclaim.go | 2 +- .../deviceclaimconfiguration.go | 2 +- .../resource/{v1alpha3 => v1}/deviceclass.go | 51 +- .../deviceclassconfiguration.go | 2 +- .../{v1alpha3 => v1}/deviceclassspec.go | 15 +- .../{v1alpha3 => v1}/deviceconfiguration.go | 2 +- .../{v1alpha3 => v1}/deviceconstraint.go | 19 +- .../devicecounterconsumption.go | 2 +- .../resource/v1/devicerequest.go | 62 + .../devicerequestallocationresult.go | 66 +- .../resource/v1/deviceselector.go | 39 + .../{v1alpha3 => v1}/devicesubrequest.go | 27 +- .../resource/v1/devicetaint.go | 71 + .../{v1alpha3 => v1}/devicetoleration.go | 18 +- .../exactdevicerequest.go} | 70 +- .../{v1alpha3 => v1}/networkdevicedata.go | 2 +- .../opaquedeviceconfiguration.go | 2 +- .../{v1alpha3 => v1}/resourceclaim.go | 53 +- .../resourceclaimconsumerreference.go | 2 +- .../{v1alpha3 => v1}/resourceclaimspec.go | 2 +- .../{v1alpha3 => v1}/resourceclaimstatus.go | 2 +- .../{v1alpha3 => v1}/resourceclaimtemplate.go | 51 +- .../resourceclaimtemplatespec.go | 24 +- .../resource/{v1alpha3 => v1}/resourcepool.go | 2 +- .../{v1alpha3 => v1}/resourceslice.go | 51 +- .../{v1alpha3 => v1}/resourceslicespec.go | 22 +- .../resource/v1alpha3/basicdevice.go | 121 - .../resource/v1alpha3/device.go | 48 - .../resource/v1alpha3/devicetaintrule.go | 17 + .../resource/v1beta1/allocateddevicestatus.go | 9 + .../resource/v1beta1/allocationresult.go | 14 +- .../resource/v1beta1/basicdevice.go | 54 +- .../resource/v1beta1/capacityrequestpolicy.go | 63 + .../v1beta1/capacityrequestpolicyrange.go | 61 + .../resource/v1beta1/capacityrequirements.go | 50 + .../resource/v1beta1/devicecapacity.go | 11 +- .../resource/v1beta1/deviceclass.go | 17 + .../resource/v1beta1/deviceclassspec.go | 13 +- .../resource/v1beta1/deviceconstraint.go | 13 +- .../resource/v1beta1/devicerequest.go | 25 +- .../v1beta1/devicerequestallocationresult.go | 64 +- .../resource/v1beta1/devicesubrequest.go | 21 +- .../resource/v1beta1/resourceclaim.go | 17 + .../resource/v1beta1/resourceclaimtemplate.go | 17 + .../v1beta1/resourceclaimtemplatespec.go | 6 + .../resource/v1beta1/resourceslice.go | 17 + .../resource/v1beta2/allocateddevicestatus.go | 9 + .../resource/v1beta2/allocationresult.go | 14 +- .../resource/v1beta2/capacityrequestpolicy.go | 63 + .../v1beta2/capacityrequestpolicyrange.go | 61 + .../resource/v1beta2/capacityrequirements.go | 50 + .../resource/v1beta2/device.go | 56 +- .../resource/v1beta2/devicecapacity.go | 11 +- .../resource/v1beta2/deviceclass.go | 17 + .../resource/v1beta2/deviceclassspec.go | 13 +- .../resource/v1beta2/deviceconstraint.go | 13 +- .../v1beta2/devicerequestallocationresult.go | 64 +- .../resource/v1beta2/devicesubrequest.go | 21 +- .../resource/v1beta2/exactdevicerequest.go | 21 +- .../resource/v1beta2/resourceclaim.go | 17 + .../resource/v1beta2/resourceclaimtemplate.go | 17 + .../v1beta2/resourceclaimtemplatespec.go | 6 + .../resource/v1beta2/resourceslice.go | 17 + .../scheduling/v1/priorityclass.go | 17 + .../scheduling/v1alpha1/priorityclass.go | 17 + .../scheduling/v1beta1/priorityclass.go | 17 + .../storage/v1/csidriver.go | 17 + .../applyconfigurations/storage/v1/csinode.go | 17 + .../storage/v1/csistoragecapacity.go | 17 + .../storage/v1/storageclass.go | 17 + .../storage/v1/volumeattachment.go | 17 + .../storage/v1/volumeattributesclass.go | 285 + .../storage/v1alpha1/csistoragecapacity.go | 17 + .../storage/v1alpha1/volumeattachment.go | 17 + .../storage/v1alpha1/volumeattributesclass.go | 17 + .../storage/v1beta1/csidriver.go | 17 + .../storage/v1beta1/csinode.go | 17 + .../storage/v1beta1/csistoragecapacity.go | 17 + .../storage/v1beta1/storageclass.go | 17 + .../storage/v1beta1/volumeattachment.go | 17 + .../storage/v1beta1/volumeattributesclass.go | 17 + .../v1alpha1/storageversionmigration.go | 17 + vendor/k8s.io/client-go/dynamic/simple.go | 46 +- .../client-go/features/known_features.go | 31 +- vendor/k8s.io/client-go/gentype/type.go | 39 - .../v1beta1/interface.go | 14 + .../v1beta1/mutatingadmissionpolicy.go | 101 + .../v1beta1/mutatingadmissionpolicybinding.go | 101 + .../certificates/v1alpha1/interface.go | 7 + .../v1alpha1/podcertificaterequest.go | 102 + vendor/k8s.io/client-go/informers/generic.go | 34 +- .../informers/networking/interface.go | 8 - .../networking/v1alpha1/interface.go | 52 - .../networking/v1alpha1/ipaddress.go | 101 - .../networking/v1alpha1/servicecidr.go | 101 - .../client-go/informers/resource/interface.go | 8 + .../resource/{v1alpha3 => v1}/deviceclass.go | 34 +- .../informers/resource/v1/interface.go | 66 + .../{v1alpha3 => v1}/resourceclaim.go | 34 +- .../{v1alpha3 => v1}/resourceclaimtemplate.go | 34 +- .../{v1alpha3 => v1}/resourceslice.go | 34 +- .../informers/resource/v1alpha3/interface.go | 28 - .../informers/storage/v1/interface.go | 7 + .../storage/v1/volumeattributesclass.go | 101 + .../k8s.io/client-go/kubernetes/clientset.go | 26 +- .../client-go/kubernetes/scheme/register.go | 4 +- .../v1beta1/admissionregistration_client.go | 10 + .../v1beta1/generated_expansion.go | 4 + .../v1beta1/mutatingadmissionpolicy.go | 75 + .../v1beta1/mutatingadmissionpolicybinding.go | 75 + .../v1alpha1/certificates_client.go | 5 + .../v1alpha1/generated_expansion.go | 2 + .../v1alpha1/podcertificaterequest.go | 79 + .../typed/networking/v1alpha1/ipaddress.go | 71 - .../typed/networking/v1alpha1/servicecidr.go | 75 - .../typed/resource/v1/deviceclass.go | 71 + .../v1alpha1 => resource/v1}/doc.go | 2 +- .../v1}/generated_expansion.go | 10 +- .../v1/resource_client.go} | 56 +- .../typed/resource/v1/resourceclaim.go | 75 + .../resource/v1/resourceclaimtemplate.go | 71 + .../typed/resource/v1/resourceslice.go | 71 + .../typed/resource/v1alpha3/deviceclass.go | 71 - .../resource/v1alpha3/generated_expansion.go | 8 - .../resource/v1alpha3/resource_client.go | 20 - .../typed/resource/v1alpha3/resourceclaim.go | 75 - .../v1alpha3/resourceclaimtemplate.go | 73 - .../typed/resource/v1alpha3/resourceslice.go | 71 - .../typed/storage/v1/generated_expansion.go | 2 + .../typed/storage/v1/storage_client.go | 5 + .../typed/storage/v1/volumeattributesclass.go | 71 + .../v1beta1/expansion_generated.go | 8 + .../v1beta1/mutatingadmissionpolicy.go | 48 + .../v1beta1/mutatingadmissionpolicybinding.go | 48 + .../v1alpha1/expansion_generated.go | 8 + .../v1alpha1/podcertificaterequest.go | 70 + .../listers/networking/v1alpha1/ipaddress.go | 48 - .../networking/v1alpha1/servicecidr.go | 48 - .../resource/{v1alpha3 => v1}/deviceclass.go | 12 +- .../resource/v1/expansion_generated.go | 43 + .../{v1alpha3 => v1}/resourceclaim.go | 18 +- .../{v1alpha3 => v1}/resourceclaimtemplate.go | 18 +- .../{v1alpha3 => v1}/resourceslice.go | 12 +- .../resource/v1alpha3/expansion_generated.go | 24 - .../listers/storage/v1/expansion_generated.go | 4 + .../storage/v1/volumeattributesclass.go | 48 + vendor/k8s.io/client-go/metadata/metadata.go | 39 - vendor/k8s.io/client-go/openapi/client.go | 4 + vendor/k8s.io/client-go/pkg/version/base.go | 8 +- vendor/k8s.io/client-go/rest/request.go | 211 +- vendor/k8s.io/client-go/testing/fixture.go | 79 +- .../client-go/tools/cache/controller.go | 16 +- .../client-go/tools/cache/delta_fifo.go | 5 + .../k8s.io/client-go/tools/cache/listers.go | 8 + .../k8s.io/client-go/tools/cache/listwatch.go | 4 + .../k8s.io/client-go/tools/cache/reflector.go | 109 +- .../client-go/tools/cache/shared_informer.go | 1 - vendor/k8s.io/client-go/tools/cache/store.go | 37 +- .../client-go/tools/cache/the_real_fifo.go | 10 +- .../client-go/tools/clientcmd/api/types.go | 17 +- .../client-go/tools/clientcmd/api/v1/types.go | 7 +- .../tools/leaderelection/leaderelection.go | 18 +- .../leaderelection/resourcelock/interface.go | 16 +- .../leaderelection/resourcelock/leaselock.go | 15 +- .../data_consistency_detector.go | 22 +- .../list_data_consistency_detector.go | 76 - .../watch_list_data_consistency_detector.go | 54 - vendor/k8s.io/client-go/util/retry/util.go | 2 +- .../client-go/util/watchlist/watch_list.go | 82 - vendor/k8s.io/client-go/util/workqueue/doc.go | 1 + .../k8s.io/client-go/util/workqueue/queue.go | 73 +- .../pkg/providers/v1/config/config.go | 26 + .../k8s.io/component-base/metrics/counter.go | 39 +- vendor/k8s.io/component-base/metrics/gauge.go | 48 +- .../component-base/metrics/histogram.go | 41 +- vendor/k8s.io/component-base/metrics/opts.go | 2 +- .../k8s.io/component-base/metrics/summary.go | 38 +- .../metrics/timing_histogram.go | 43 +- vendor/k8s.io/component-base/version/base.go | 10 +- .../pkg/features/kube_features.go | 19 +- .../pkg/apis/apiregistration/v1/defaults.go | 4 +- .../apis/apiregistration/v1beta1/defaults.go | 4 +- .../k8s.io/kube-openapi/pkg/common/common.go | 4 +- .../kube-openapi/pkg/schemaconv/openapi.go | 2 +- .../pkg/schemaconv/proto_models.go | 2 +- .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 2 +- .../kube-openapi/pkg/util/proto/document.go | 2 +- .../pkg/util/proto/document_v3.go | 2 +- vendor/k8s.io/utils/buffer/ring_growing.go | 122 +- .../k8s.io/utils/clock/testing/fake_clock.go | 16 +- vendor/k8s.io/utils/pointer/OWNERS | 10 - vendor/k8s.io/utils/pointer/README.md | 3 - vendor/k8s.io/utils/pointer/pointer.go | 249 - vendor/modules.txt | 422 +- .../structured-merge-diff/{v4 => v6}/LICENSE | 0 .../{v4 => v6}/fieldpath/doc.go | 0 .../{v4 => v6}/fieldpath/element.go | 73 +- .../{v4 => v6}/fieldpath/fromvalue.go | 2 +- .../{v4 => v6}/fieldpath/managers.go | 0 .../{v4 => v6}/fieldpath/path.go | 2 +- .../{v4 => v6}/fieldpath/pathelementmap.go | 2 +- .../{v4 => v6}/fieldpath/serialize-pe.go | 28 +- .../{v4 => v6}/fieldpath/serialize.go | 0 .../{v4 => v6}/fieldpath/set.go | 43 +- .../{v4 => v6}/merge/conflict.go | 2 +- .../{v4 => v6}/merge/update.go | 7 +- .../{v4 => v6}/schema/doc.go | 0 .../{v4 => v6}/schema/elements.go | 0 .../{v4 => v6}/schema/equals.go | 0 .../{v4 => v6}/schema/schemaschema.go | 0 .../{v4 => v6}/typed/compare.go | 6 +- .../{v4 => v6}/typed/doc.go | 0 .../{v4 => v6}/typed/helpers.go | 15 +- .../{v4 => v6}/typed/merge.go | 6 +- .../{v4 => v6}/typed/parser.go | 6 +- .../{v4 => v6}/typed/reconcile_schema.go | 4 +- .../{v4 => v6}/typed/remove.go | 6 +- .../{v4 => v6}/typed/tofieldset.go | 6 +- .../{v4 => v6}/typed/typed.go | 6 +- .../{v4 => v6}/typed/validate.go | 10 +- .../{v4 => v6}/value/allocator.go | 0 .../{v4 => v6}/value/doc.go | 0 .../{v4 => v6}/value/fields.go | 8 + .../{v4 => v6}/value/jsontagutil.go | 63 +- .../{v4 => v6}/value/list.go | 0 .../{v4 => v6}/value/listreflect.go | 0 .../{v4 => v6}/value/listunstructured.go | 0 .../{v4 => v6}/value/map.go | 0 .../{v4 => v6}/value/mapreflect.go | 0 .../{v4 => v6}/value/mapunstructured.go | 0 .../{v4 => v6}/value/reflectcache.go | 14 +- .../{v4 => v6}/value/scalar.go | 0 .../{v4 => v6}/value/structreflect.go | 0 .../{v4 => v6}/value/value.go | 19 +- .../{v4 => v6}/value/valuereflect.go | 0 .../{v4 => v6}/value/valueunstructured.go | 0 vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 71 - .../yaml/goyaml.v2/yaml_aliases.go | 85 - 2297 files changed, 154315 insertions(+), 72683 deletions(-) create mode 100644 vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml create mode 100644 vendor/github.com/alingse/nilnesserr/.gitignore create mode 100644 vendor/github.com/alingse/nilnesserr/.golangci.yaml rename vendor/github.com/{shazow/go-diff => alingse/nilnesserr}/LICENSE (94%) create mode 100644 vendor/github.com/alingse/nilnesserr/README.md create mode 100644 vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go create mode 100644 vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go create mode 100644 vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go create mode 100644 vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go create mode 100644 vendor/github.com/alingse/nilnesserr/linter.go create mode 100644 vendor/github.com/alingse/nilnesserr/nilerr.go create mode 100644 vendor/github.com/alingse/nilnesserr/nilness.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDelegateMacVolumeOwnershipTask.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImageUsageReport.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateMacSystemIntegrityProtectionModificationTask.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteImageUsageReport.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockStatus.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlocks.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReportEntries.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReports.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacModificationTasks.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetActiveVpnTunnelStatus.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceConnectEndpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPublicIpDnsNameOptions.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/interceptor.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go create mode 100644 vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go delete mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/errors.go delete mode 100644 vendor/github.com/golangci/dupl/.travis.yml delete mode 100644 vendor/github.com/golangci/dupl/README.md rename vendor/github.com/golangci/dupl/{main.go => lib/lib.go} (51%) create mode 100644 vendor/github.com/golangci/dupl/printer/issuer.go delete mode 100644 vendor/github.com/golangci/gofmt/goimports/goimports.go delete mode 100644 vendor/github.com/golangci/gofmt/goimports/golangci.go delete mode 100644 vendor/github.com/golangci/gofmt/goimports/readme.md rename vendor/github.com/{sagikazarmark/slog-shim => golangci/golangci-lint/internal/x}/LICENSE (92%) create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md create mode 100644 vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/base_rule.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/config/linters_exclusions.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/fsutils/basepath.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go rename vendor/github.com/golangci/golangci-lint/pkg/goanalysis/{runner_base.go => runner_checker.go} (52%) create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/analyzer.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/LICENSE create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/config/config.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/parser.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/section.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard_list.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/commons.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/diff.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go rename vendor/github.com/golangci/golangci-lint/pkg/golinters/{exportloopref/exportloopref.go => exptostd/exptostd.go} (75%) delete mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go delete mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go delete mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go rename vendor/github.com/golangci/golangci-lint/pkg/result/processors/{autogenerated_exclude.go => exclusion_generated_file_filter.go} (82%) create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_paths.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_presets.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_rules.go rename vendor/github.com/golangci/golangci-lint/pkg/result/processors/{nolint.go => nolint_filter.go} (86%) create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_absoluter.go delete mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_relativity.go delete mode 100644 vendor/github.com/golangci/modinfo/.gitignore delete mode 100644 vendor/github.com/golangci/modinfo/.golangci.yml delete mode 100644 vendor/github.com/golangci/modinfo/LICENSE delete mode 100644 vendor/github.com/golangci/modinfo/Makefile delete mode 100644 vendor/github.com/golangci/modinfo/module.go delete mode 100644 vendor/github.com/golangci/modinfo/readme.md create mode 100644 vendor/github.com/golangci/revgrep/issue.go create mode 100644 vendor/github.com/golangci/revgrep/patch.go create mode 100644 vendor/github.com/gostaticanalysis/comment/.tagpr create mode 100644 vendor/github.com/gostaticanalysis/comment/CHANGELOG.md create mode 100644 vendor/github.com/gostaticanalysis/comment/version.txt create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/.tagpr create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/CHANGELOG.md create mode 100644 vendor/github.com/gostaticanalysis/forcetypeassert/version.txt create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md rename vendor/github.com/hashicorp/{hcl => go-immutable-radix/v2}/LICENSE (50%) create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/README.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/node.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/internal/list.go rename vendor/github.com/{golangci/gofmt/goimports/LICENSE => hashicorp/golang-lru/v2/simplelru/LICENSE_list} (97%) create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go delete mode 100644 vendor/github.com/hashicorp/hcl/.gitignore delete mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml delete mode 100644 vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 vendor/github.com/hashicorp/hcl/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 vendor/github.com/hashicorp/hcl/parse.go create mode 100644 vendor/github.com/julz/importas/Makefile delete mode 100644 vendor/github.com/kisielk/errcheck/errcheck/tags.go delete mode 100644 vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go delete mode 100644 vendor/github.com/kyoh86/exportloopref/.golangci.yml delete mode 100644 vendor/github.com/kyoh86/exportloopref/.goreleaser.yml delete mode 100644 vendor/github.com/kyoh86/exportloopref/LICENSE delete mode 100644 vendor/github.com/kyoh86/exportloopref/Makefile delete mode 100644 vendor/github.com/kyoh86/exportloopref/README.md delete mode 100644 vendor/github.com/kyoh86/exportloopref/exportloopref.go create mode 100644 vendor/github.com/ldez/exptostd/.gitignore create mode 100644 vendor/github.com/ldez/exptostd/.golangci.yml create mode 100644 vendor/github.com/ldez/exptostd/LICENSE create mode 100644 vendor/github.com/ldez/exptostd/Makefile create mode 100644 vendor/github.com/ldez/exptostd/exptostd.go create mode 100644 vendor/github.com/ldez/exptostd/readme.md create mode 100644 vendor/github.com/ldez/grignotin/goenv/goenv.go create mode 100644 vendor/github.com/ldez/grignotin/goenv/names.go create mode 100644 vendor/github.com/ldez/grignotin/gomod/gomod.go create mode 100644 vendor/github.com/ldez/tagliatelle/converter.go create mode 100644 vendor/github.com/ldez/usetesting/.gitignore create mode 100644 vendor/github.com/ldez/usetesting/.golangci.yml create mode 100644 vendor/github.com/ldez/usetesting/LICENSE create mode 100644 vendor/github.com/ldez/usetesting/Makefile create mode 100644 vendor/github.com/ldez/usetesting/readme.md create mode 100644 vendor/github.com/ldez/usetesting/report.go create mode 100644 vendor/github.com/ldez/usetesting/usetesting.go delete mode 100644 vendor/github.com/magiconair/properties/.gitignore delete mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md delete mode 100644 vendor/github.com/magiconair/properties/LICENSE.md delete mode 100644 vendor/github.com/magiconair/properties/README.md delete mode 100644 vendor/github.com/magiconair/properties/decode.go delete mode 100644 vendor/github.com/magiconair/properties/doc.go delete mode 100644 vendor/github.com/magiconair/properties/integrate.go delete mode 100644 vendor/github.com/magiconair/properties/lex.go delete mode 100644 vendor/github.com/magiconair/properties/load.go delete mode 100644 vendor/github.com/magiconair/properties/parser.go delete mode 100644 vendor/github.com/magiconair/properties/properties.go delete mode 100644 vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 vendor/github.com/matoous/godox/Makefile delete mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mgechev/revive/internal/astutils/ast_utils.go create mode 100644 vendor/github.com/mgechev/revive/rule/redundant_build_tag.go create mode 100644 vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go create mode 100644 vendor/github.com/mgechev/revive/rule/use_errors_new.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go delete mode 100644 vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go create mode 100644 vendor/github.com/openshift/api/.golangci.go-validated.yaml create mode 100644 vendor/github.com/openshift/api/AGENTS.md create mode 100644 vendor/github.com/openshift/api/config/v1/types_insights.go delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-DevPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-TechPreviewNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-Default.crd.yaml delete mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/{0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml => 0000_10_config-operator_01_authentications.crd.yaml} (97%) create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-Default.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-Default.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-CustomNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/{0000_20_kube-apiserver_01_kubeapiservers.crd.yaml => 0000_20_kube-apiserver_01_kubeapiservers-Default.crd.yaml} (99%) create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/{0000_80_machine-config_01_machineconfigurations.crd.yaml => 0000_80_machine-config_01_machineconfigurations-Default.crd.yaml} (98%) create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagerconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/audit.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimageautomatic.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimagemanual.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/irreconcilablevalidationoverrides.go create mode 100644 vendor/github.com/raeperd/recvcheck/.golangci.yml rename vendor/github.com/{golangci/gofmt/gofmt/internal => rogpeppe/go-internal}/diff/diff.go (98%) create mode 100644 vendor/github.com/sagikazarmark/locafero/glob.go create mode 100644 vendor/github.com/sagikazarmark/locafero/glob_windows.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/.envrc delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/.gitignore delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/README.md delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/attr.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/attr_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/flake.lock delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/flake.nix delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/handler.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/handler_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/json_handler.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/level.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/level_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/logger.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/logger_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/record.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/record_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/text_handler.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/value.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/value_120.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go delete mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go rename vendor/github.com/santhosh-tekuri/jsonschema/{v5 => v6}/.gitmodules (91%) create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml rename vendor/github.com/santhosh-tekuri/jsonschema/{v5 => v6}/LICENSE (100%) create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go create mode 100644 vendor/github.com/securego/gosec/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/securego/gosec/v2/RULES.md rename vendor/github.com/securego/gosec/v2/analyzers/{hardcodedNonce.go => hardcoded_nonce.go} (97%) delete mode 100644 vendor/github.com/shazow/go-diff/difflib/differ.go rename vendor/github.com/{sagikazarmark/slog-shim => spf13/afero}/.editorconfig (68%) create mode 100644 vendor/github.com/spf13/afero/.golangci.yaml create mode 100644 vendor/github.com/spf13/viper/UPDATES.md create mode 100644 vendor/github.com/spf13/viper/encoding.go create mode 100644 vendor/github.com/spf13/viper/experimental.go delete mode 100644 vendor/github.com/spf13/viper/file_finder.go create mode 100644 vendor/github.com/spf13/viper/finder.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/decoder.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/encoder.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/error.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/ini/codec.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go create mode 100644 vendor/github.com/spf13/viper/internal/features/finder.go create mode 100644 vendor/github.com/spf13/viper/internal/features/finder_default.go create mode 100644 vendor/github.com/spf13/viper/remote.go create mode 100644 vendor/github.com/ultraware/funlen/.golangci.yml create mode 100644 vendor/github.com/ultraware/funlen/funlen.go delete mode 100644 vendor/github.com/ultraware/funlen/main.go create mode 100644 vendor/go.yaml.in/yaml/v3/LICENSE rename vendor/{k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go => go.yaml.in/yaml/v3/NOTICE} (58%) create mode 100644 vendor/go.yaml.in/yaml/v3/README.md create mode 100644 vendor/go.yaml.in/yaml/v3/apic.go create mode 100644 vendor/go.yaml.in/yaml/v3/decode.go create mode 100644 vendor/go.yaml.in/yaml/v3/emitterc.go create mode 100644 vendor/go.yaml.in/yaml/v3/encode.go create mode 100644 vendor/go.yaml.in/yaml/v3/parserc.go create mode 100644 vendor/go.yaml.in/yaml/v3/readerc.go create mode 100644 vendor/go.yaml.in/yaml/v3/resolve.go create mode 100644 vendor/go.yaml.in/yaml/v3/scannerc.go create mode 100644 vendor/go.yaml.in/yaml/v3/sorter.go create mode 100644 vendor/go.yaml.in/yaml/v3/writerc.go create mode 100644 vendor/go.yaml.in/yaml/v3/yaml.go create mode 100644 vendor/go.yaml.in/yaml/v3/yamlh.go create mode 100644 vendor/go.yaml.in/yaml/v3/yamlprivateh.go delete mode 100644 vendor/golang.org/x/exp/constraints/constraints.go delete mode 100644 vendor/golang.org/x/exp/maps/maps.go delete mode 100644 vendor/golang.org/x/exp/slog/attr.go delete mode 100644 vendor/golang.org/x/exp/slog/doc.go delete mode 100644 vendor/golang.org/x/exp/slog/handler.go delete mode 100644 vendor/golang.org/x/exp/slog/internal/buffer/buffer.go delete mode 100644 vendor/golang.org/x/exp/slog/internal/ignorepc.go delete mode 100644 vendor/golang.org/x/exp/slog/json_handler.go delete mode 100644 vendor/golang.org/x/exp/slog/level.go delete mode 100644 vendor/golang.org/x/exp/slog/logger.go delete mode 100644 vendor/golang.org/x/exp/slog/noplog.bench delete mode 100644 vendor/golang.org/x/exp/slog/record.go delete mode 100644 vendor/golang.org/x/exp/slog/text_handler.go delete mode 100644 vendor/golang.org/x/exp/slog/value.go delete mode 100644 vendor/golang.org/x/exp/slog/value_119.go delete mode 100644 vendor/golang.org/x/exp/slog/value_120.go delete mode 100644 vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go create mode 100644 vendor/golang.org/x/text/feature/plural/common.go create mode 100644 vendor/golang.org/x/text/feature/plural/message.go create mode 100644 vendor/golang.org/x/text/feature/plural/plural.go create mode 100644 vendor/golang.org/x/text/feature/plural/tables.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/catmsg.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/codec.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/varint.go create mode 100644 vendor/golang.org/x/text/internal/format/format.go create mode 100644 vendor/golang.org/x/text/internal/format/parser.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/text/internal/number/common.go create mode 100644 vendor/golang.org/x/text/internal/number/decimal.go create mode 100644 vendor/golang.org/x/text/internal/number/format.go create mode 100644 vendor/golang.org/x/text/internal/number/number.go create mode 100644 vendor/golang.org/x/text/internal/number/pattern.go create mode 100644 vendor/golang.org/x/text/internal/number/roundingmode_string.go create mode 100644 vendor/golang.org/x/text/internal/number/tables.go create mode 100644 vendor/golang.org/x/text/internal/stringset/set.go create mode 100644 vendor/golang.org/x/text/message/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog/dict.go create mode 100644 vendor/golang.org/x/text/message/catalog/go19.go create mode 100644 vendor/golang.org/x/text/message/catalog/gopre19.go create mode 100644 vendor/golang.org/x/text/message/doc.go create mode 100644 vendor/golang.org/x/text/message/format.go create mode 100644 vendor/golang.org/x/text/message/message.go create mode 100644 vendor/golang.org/x/text/message/print.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/types.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/resource/v1/devicetaint.go rename vendor/k8s.io/api/{networking/v1alpha1 => resource/v1}/doc.go (83%) create mode 100644 vendor/k8s.io/api/resource/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1/generated.proto rename vendor/k8s.io/api/{networking/v1alpha1 => resource/v1}/register.go (54%) create mode 100644 vendor/k8s.io/api/resource/v1/types.go create mode 100644 vendor/k8s.io/api/resource/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/resource/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/devicetaint.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/safe/safe.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/README.md create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/common.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/constraints/constraints.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/each.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/enum.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/equality.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/immutable.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/item.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/limits.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/required.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/subfield.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/testing.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/union.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validate/zeroorone.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/transcoding.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/cmp.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/legacy_diff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/runtimetypeconverter.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/applyconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/jsonpatch.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutation.go rename vendor/k8s.io/client-go/applyconfigurations/{networking/v1alpha1/servicecidr.go => certificates/v1alpha1/podcertificaterequest.go} (62%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequestspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequeststatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerextendedresourcerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartruleonexitcodes.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/filekeyselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podcertificateprojection.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podextendedresourceclaimstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/allocateddevicestatus.go (85%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/allocationresult.go (66%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicyrange.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequirements.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/celdeviceselector.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/counter.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/counterset.go (99%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/device.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceallocationconfiguration.go (89%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceallocationresult.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceattribute.go (99%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecapacity.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclaim.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclaimconfiguration.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclass.go (86%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclassconfiguration.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceclassspec.go (73%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceconfiguration.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/deviceconstraint.go (69%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicecounterconsumption.go (99%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequest.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicerequestallocationresult.go (52%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceselector.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicesubrequest.go (76%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetaint.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/devicetoleration.go (82%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3/devicerequest.go => v1/exactdevicerequest.go} (52%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/networkdevicedata.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/opaquedeviceconfiguration.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaim.go (86%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimconsumerreference.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimspec.go (98%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimstatus.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimtemplate.go (86%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceclaimtemplatespec.go (91%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourcepool.go (99%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceslice.go (86%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha3 => v1}/resourceslicespec.go (84%) delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicyrange.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequirements.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicyrange.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequirements.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1alpha1/podcertificaterequest.go delete mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go delete mode 100644 vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go rename vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/deviceclass.go (71%) create mode 100644 vendor/k8s.io/client-go/informers/resource/v1/interface.go rename vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/resourceclaim.go (71%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/resourceclaimtemplate.go (70%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha3 => v1}/resourceslice.go (71%) create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/podcertificaterequest.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1/deviceclass.go rename vendor/k8s.io/client-go/kubernetes/typed/{networking/v1alpha1 => resource/v1}/doc.go (97%) rename vendor/k8s.io/client-go/kubernetes/typed/{networking/v1alpha1 => resource/v1}/generated_expansion.go (77%) rename vendor/k8s.io/client-go/kubernetes/typed/{networking/v1alpha1/networking_client.go => resource/v1/resource_client.go} (56%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1/resourceslice.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1alpha1/podcertificaterequest.go delete mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go delete mode 100644 vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go rename vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/deviceclass.go (78%) create mode 100644 vendor/k8s.io/client-go/listers/resource/v1/expansion_generated.go rename vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/resourceclaim.go (79%) rename vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/resourceclaimtemplate.go (78%) rename vendor/k8s.io/client-go/listers/resource/{v1alpha3 => v1}/resourceslice.go (77%) create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/volumeattributesclass.go delete mode 100644 vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go delete mode 100644 vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go delete mode 100644 vendor/k8s.io/client-go/util/watchlist/watch_list.go delete mode 100644 vendor/k8s.io/utils/pointer/OWNERS delete mode 100644 vendor/k8s.io/utils/pointer/README.md delete mode 100644 vendor/k8s.io/utils/pointer/pointer.go rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/LICENSE (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/doc.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/element.go (78%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/fromvalue.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/managers.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/path.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/pathelementmap.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/serialize-pe.go (84%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/serialize.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/fieldpath/set.go (95%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/merge/conflict.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/merge/update.go (99%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/schema/doc.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/schema/elements.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/schema/equals.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/schema/schemaschema.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/compare.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/doc.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/helpers.go (93%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/merge.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/parser.go (97%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/reconcile_schema.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/remove.go (97%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/tofieldset.go (96%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/typed.go (98%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/typed/validate.go (96%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/allocator.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/doc.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/fields.go (92%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/jsontagutil.go (57%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/list.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/listreflect.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/listunstructured.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/map.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/mapreflect.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/mapunstructured.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/reflectcache.go (97%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/scalar.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/structreflect.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/value.go (94%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/valuereflect.go (100%) rename vendor/sigs.k8s.io/structured-merge-diff/{v4 => v6}/value/valueunstructured.go (100%) delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/README.md delete mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml_aliases.go diff --git a/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go b/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go index 19948c454..e719155d9 100644 --- a/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go +++ b/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go @@ -72,12 +72,11 @@ func isKnown(directive string) bool { return false } +// Found by running the following command on the source of go. +// git grep -o -E -h '//go:[a-z_-]+' -- ':!**/*_test.go' ':!test/' ':!**/testdata/**' | sort -u +// See https://pkg.go.dev/cmd/compile@go1.24#hdr-Compiler_Directives var known = []string{ - // Found by running the following command on the source of go. - // git grep -o -E -h '//go:[a-z_]+' -- ':!**/*_test.go' ':!test/' ':!**/testdata/**' | sort -u - "binary", "build", - "buildsomethingelse", "cgo_dynamic_linker", "cgo_export_dynamic", "cgo_export_static", @@ -85,10 +84,10 @@ var known = []string{ "cgo_import_static", "cgo_ldflag", "cgo_unsafe_args", + "debug", "embed", "generate", "linkname", - "name", "nocheckptr", "noescape", "noinline", @@ -101,5 +100,7 @@ var known = []string{ "systemstack", "uintptrescapes", "uintptrkeepalive", + "wasmimport", + "wasmexport", "yeswritebarrierrec", } diff --git a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go index edf9193ec..c17c6acca 100644 --- a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go +++ b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go @@ -1,7 +1,6 @@ package checknoglobals import ( - "flag" "fmt" "go/ast" "go/token" @@ -37,18 +36,10 @@ func Analyzer() *analysis.Analyzer { Name: "gochecknoglobals", Doc: Doc, Run: checkNoGlobals, - Flags: flags(), RunDespiteErrors: true, } } -func flags() flag.FlagSet { - flags := flag.NewFlagSet("", flag.ExitOnError) - flags.Bool("t", false, "Include tests") - - return *flags -} - func isAllowed(cm ast.CommentMap, v ast.Node, ti *types.Info) bool { switch i := v.(type) { case *ast.GenDecl: @@ -138,16 +129,11 @@ func hasEmbedComment(cm ast.CommentMap, n ast.Node) bool { } func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { - includeTests := pass.Analyzer.Flags.Lookup("t").Value.(flag.Getter).Get().(bool) - for _, file := range pass.Files { filename := pass.Fset.Position(file.Pos()).Filename if !strings.HasSuffix(filename, ".go") { continue } - if !includeTests && strings.HasSuffix(filename, "_test.go") { - continue - } fileCommentMap := ast.NewCommentMap(pass.Fset, file, file.Comments) diff --git a/vendor/github.com/4meepo/tagalign/.gitignore b/vendor/github.com/4meepo/tagalign/.gitignore index e37bb52e4..1c6218ee2 100644 --- a/vendor/github.com/4meepo/tagalign/.gitignore +++ b/vendor/github.com/4meepo/tagalign/.gitignore @@ -17,6 +17,7 @@ *.test .vscode +.idea/ # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/vendor/github.com/4meepo/tagalign/.goreleaser.yml index e7b6f6800..37dfec7c8 100644 --- a/vendor/github.com/4meepo/tagalign/.goreleaser.yml +++ b/vendor/github.com/4meepo/tagalign/.goreleaser.yml @@ -1,4 +1,4 @@ ---- +version: 2 project_name: tagalign release: @@ -29,4 +29,4 @@ builds: goarch: 386 - goos: freebsd goarch: arm64 - main: ./cmd/tagalign/ \ No newline at end of file + main: ./cmd/tagalign/ diff --git a/vendor/github.com/4meepo/tagalign/options.go b/vendor/github.com/4meepo/tagalign/options.go index ddec98da7..2a7859246 100644 --- a/vendor/github.com/4meepo/tagalign/options.go +++ b/vendor/github.com/4meepo/tagalign/options.go @@ -2,13 +2,6 @@ package tagalign type Option func(*Helper) -// WithMode specify the mode of tagalign. -func WithMode(mode Mode) Option { - return func(h *Helper) { - h.mode = mode - } -} - // WithSort enable tags sort. // fixedOrder specify the order of tags, the other tags will be sorted by name. // Sory is disabled by default. diff --git a/vendor/github.com/4meepo/tagalign/tagalign.go b/vendor/github.com/4meepo/tagalign/tagalign.go index 4734b5666..8161a0aa7 100644 --- a/vendor/github.com/4meepo/tagalign/tagalign.go +++ b/vendor/github.com/4meepo/tagalign/tagalign.go @@ -1,27 +1,19 @@ package tagalign import ( + "cmp" "fmt" "go/ast" "go/token" - "log" "reflect" - "sort" + "slices" "strconv" "strings" "github.com/fatih/structtag" - "golang.org/x/tools/go/analysis" ) -type Mode int - -const ( - StandaloneMode Mode = iota - GolangciLintMode -) - type Style int const ( @@ -44,11 +36,14 @@ func NewAnalyzer(options ...Option) *analysis.Analyzer { } } -func Run(pass *analysis.Pass, options ...Option) []Issue { - var issues []Issue +func Run(pass *analysis.Pass, options ...Option) { for _, f := range pass.Files { + filename := getFilename(pass.Fset, f) + if !strings.HasSuffix(filename, ".go") { + continue + } + h := &Helper{ - mode: StandaloneMode, style: DefaultStyle, align: true, } @@ -63,22 +58,19 @@ func Run(pass *analysis.Pass, options ...Option) []Issue { if !h.align && !h.sort { // do nothing - return nil + return } ast.Inspect(f, func(n ast.Node) bool { h.find(pass, n) return true }) + h.Process(pass) - issues = append(issues, h.issues...) } - return issues } type Helper struct { - mode Mode - style Style align bool // whether enable tags align. @@ -87,19 +79,6 @@ type Helper struct { singleFields []*ast.Field consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct. - issues []Issue -} - -// Issue is used to integrate with golangci-lint's inline auto fix. -type Issue struct { - Pos token.Position - Message string - InlineFix InlineFix -} -type InlineFix struct { - StartCol int // zero-based - Length int - NewString string } func (w *Helper) find(pass *analysis.Pass, n ast.Node) { @@ -159,42 +138,28 @@ func (w *Helper) find(pass *analysis.Pass, n ast.Node) { split() } -func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) { - if w.mode == GolangciLintMode { - iss := Issue{ - Pos: pass.Fset.Position(field.Tag.Pos()), - Message: msg, - InlineFix: InlineFix{ - StartCol: startCol, - Length: len(field.Tag.Value), - NewString: replaceStr, - }, - } - w.issues = append(w.issues, iss) - } - - if w.mode == StandaloneMode { - pass.Report(analysis.Diagnostic{ - Pos: field.Tag.Pos(), - End: field.Tag.End(), - Message: msg, - SuggestedFixes: []analysis.SuggestedFix{ - { - Message: msg, - TextEdits: []analysis.TextEdit{ - { - Pos: field.Tag.Pos(), - End: field.Tag.End(), - NewText: []byte(replaceStr), - }, +func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr string) { + pass.Report(analysis.Diagnostic{ + Pos: field.Tag.Pos(), + End: field.Tag.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: field.Tag.Pos(), + End: field.Tag.End(), + NewText: []byte(replaceStr), }, }, }, - }) - } + }, + }) } -func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit +//nolint:gocognit,gocyclo,nestif +func (w *Helper) Process(pass *analysis.Pass) { // process grouped fields for _, fields := range w.consecutiveFieldsGroups { offsets := make([]int, len(fields)) @@ -220,7 +185,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit tag, err := strconv.Unquote(field.Tag.Value) if err != nil { // if tag value is not a valid string, report it directly - w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + w.report(pass, field, errTagValueSyntax, field.Tag.Value) fields = removeField(fields, i) continue } @@ -228,7 +193,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit tags, err := structtag.Parse(tag) if err != nil { // if tag value is not a valid struct tag, report it directly - w.report(pass, field, column, err.Error(), field.Tag.Value) + w.report(pass, field, err.Error(), field.Tag.Value) fields = removeField(fields, i) continue } @@ -241,7 +206,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit cp[i] = tag } notSortedTagsGroup = append(notSortedTagsGroup, cp) - sortBy(w.fixedTagOrder, tags) + sortTags(w.fixedTagOrder, tags) } for _, t := range tags.Tags() { addKey(t.Key) @@ -252,7 +217,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit } if w.sort && StrictStyle == w.style { - sortAllKeys(w.fixedTagOrder, uniqueKeys) + sortKeys(w.fixedTagOrder, uniqueKeys) maxTagNum = len(uniqueKeys) } @@ -340,27 +305,26 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit msg := "tag is not aligned, should be: " + unquoteTag - w.report(pass, field, offsets[i], msg, newTagValue) + w.report(pass, field, msg, newTagValue) } } // process single fields for _, field := range w.singleFields { - column := pass.Fset.Position(field.Tag.Pos()).Column - 1 tag, err := strconv.Unquote(field.Tag.Value) if err != nil { - w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + w.report(pass, field, errTagValueSyntax, field.Tag.Value) continue } tags, err := structtag.Parse(tag) if err != nil { - w.report(pass, field, column, err.Error(), field.Tag.Value) + w.report(pass, field, err.Error(), field.Tag.Value) continue } originalTags := append([]*structtag.Tag(nil), tags.Tags()...) if w.sort { - sortBy(w.fixedTagOrder, tags) + sortTags(w.fixedTagOrder, tags) } newTagValue := fmt.Sprintf("`%s`", tags.String()) @@ -371,85 +335,47 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit msg := "tag is not aligned , should be: " + tags.String() - w.report(pass, field, column, msg, newTagValue) + w.report(pass, field, msg, newTagValue) } } -// Issues returns all issues found by the analyzer. -// It is used to integrate with golangci-lint. -func (w *Helper) Issues() []Issue { - log.Println("tagalign 's Issues() should only be called in golangci-lint mode") - return w.issues -} - -// sortBy sorts tags by fixed order. +// sortTags sorts tags by fixed order. // If a tag is not in the fixed order, it will be sorted by name. -func sortBy(fixedOrder []string, tags *structtag.Tags) { - // sort by fixed order - sort.Slice(tags.Tags(), func(i, j int) bool { - ti := tags.Tags()[i] - tj := tags.Tags()[j] - - oi := findIndex(fixedOrder, ti.Key) - oj := findIndex(fixedOrder, tj.Key) - - if oi == -1 && oj == -1 { - return ti.Key < tj.Key - } - - if oi == -1 { - return false - } - - if oj == -1 { - return true - } - - return oi < oj +func sortTags(fixedOrder []string, tags *structtag.Tags) { + slices.SortFunc(tags.Tags(), func(a, b *structtag.Tag) int { + return compareByFixedOrder(fixedOrder)(a.Key, b.Key) }) } -func sortAllKeys(fixedOrder []string, keys []string) { - sort.Slice(keys, func(i, j int) bool { - oi := findIndex(fixedOrder, keys[i]) - oj := findIndex(fixedOrder, keys[j]) +func sortKeys(fixedOrder []string, keys []string) { + slices.SortFunc(keys, compareByFixedOrder(fixedOrder)) +} + +func compareByFixedOrder(fixedOrder []string) func(a, b string) int { + return func(a, b string) int { + oi := slices.Index(fixedOrder, a) + oj := slices.Index(fixedOrder, b) if oi == -1 && oj == -1 { - return keys[i] < keys[j] + return strings.Compare(a, b) } if oi == -1 { - return false + return 1 } if oj == -1 { - return true + return -1 } - return oi < oj - }) -} - -func findIndex(s []string, e string) int { - for i, a := range s { - if a == e { - return i - } + return cmp.Compare(oi, oj) } - return -1 } func alignFormat(length int) string { return "%" + fmt.Sprintf("-%ds", length) } -func max(a, b int) int { - if a > b { - return a - } - return b -} - func removeField(fields []*ast.Field, index int) []*ast.Field { if index < 0 || index >= len(fields) { return fields @@ -457,3 +383,12 @@ func removeField(fields []*ast.Field, index int) []*ast.Field { return append(fields[:index], fields[index+1:]...) } + +func getFilename(fset *token.FileSet, file *ast.File) string { + filename := fset.PositionFor(file.Pos(), true).Filename + if !strings.HasSuffix(filename, ".go") { + return fset.PositionFor(file.Pos(), false).Filename + } + + return filename +} diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go index 5507d9546..703cc1c39 100644 --- a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -125,7 +125,7 @@ const ( ) func (n *nilNil) isDangerNilType(t types.Type) (bool, zeroValue) { - switch v := t.(type) { + switch v := types.Unalias(t).(type) { case *types.Pointer: return n.checkedTypes.Contains(ptrType), zeroValueNil diff --git a/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go b/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go index 7b88bf56e..42c06f4e7 100644 --- a/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go +++ b/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go @@ -3,33 +3,57 @@ package analyzer import ( "bytes" "errors" + "flag" "fmt" "go/ast" "go/printer" "go/token" - "go/types" + "slices" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" ) -var Analyzer = &analysis.Analyzer{ - Name: "fatcontext", - Doc: "detects nested contexts in loops and function literals", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, +const FlagCheckStructPointers = "check-struct-pointers" + +func NewAnalyzer() *analysis.Analyzer { + r := &runner{} + + flags := flag.NewFlagSet("fatcontext", flag.ExitOnError) + flags.BoolVar(&r.DetectInStructPointers, FlagCheckStructPointers, false, + "set to true to detect potential fat contexts in struct pointers") + + return &analysis.Analyzer{ + Name: "fatcontext", + Doc: "detects nested contexts in loops and function literals", + Run: r.run, + Flags: *flags, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } } var errUnknown = errors.New("unknown node type") -func run(pass *analysis.Pass) (interface{}, error) { +const ( + categoryInLoop = "nested context in loop" + categoryInFuncLit = "nested context in function literal" + categoryInStructPointer = "potential nested context in struct pointer" + categoryUnsupported = "unsupported nested context type" +) + +type runner struct { + DetectInStructPointers bool +} + +func (r *runner) run(pass *analysis.Pass) (interface{}, error) { inspctr := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.ForStmt)(nil), (*ast.RangeStmt)(nil), (*ast.FuncLit)(nil), + (*ast.FuncDecl)(nil), } inspctr.Preorder(nodeFilter, func(node ast.Node) { @@ -38,36 +62,26 @@ func run(pass *analysis.Pass) (interface{}, error) { return } + if body == nil { + return + } + assignStmt := findNestedContext(pass, node, body.List) if assignStmt == nil { return } - suggestedStmt := ast.AssignStmt{ - Lhs: assignStmt.Lhs, - TokPos: assignStmt.TokPos, - Tok: token.DEFINE, - Rhs: assignStmt.Rhs, - } - suggested, err := render(pass.Fset, &suggestedStmt) - - var fixes []analysis.SuggestedFix - if err == nil { - fixes = append(fixes, analysis.SuggestedFix{ - Message: "replace `=` with `:=`", - TextEdits: []analysis.TextEdit{ - { - Pos: assignStmt.Pos(), - End: assignStmt.End(), - NewText: suggested, - }, - }, - }) + category := getCategory(pass, node, assignStmt) + + if r.shouldIgnoreReport(category) { + return } + fixes := r.getSuggestedFixes(pass, assignStmt, category) + pass.Report(analysis.Diagnostic{ Pos: assignStmt.Pos(), - Message: getReportMessage(node), + Message: category, SuggestedFixes: fixes, }) }) @@ -75,31 +89,69 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } -func getReportMessage(node ast.Node) string { +func (r *runner) shouldIgnoreReport(category string) bool { + return category == categoryInStructPointer && !r.DetectInStructPointers +} + +func (r *runner) getSuggestedFixes(pass *analysis.Pass, assignStmt *ast.AssignStmt, category string) []analysis.SuggestedFix { + switch category { + case categoryInStructPointer, categoryUnsupported: + return nil + } + + suggestedStmt := ast.AssignStmt{ + Lhs: assignStmt.Lhs, + TokPos: assignStmt.TokPos, + Tok: token.DEFINE, + Rhs: assignStmt.Rhs, + } + suggested, err := render(pass.Fset, &suggestedStmt) + + var fixes []analysis.SuggestedFix + if err == nil { + fixes = append(fixes, analysis.SuggestedFix{ + Message: "replace `=` with `:=`", + TextEdits: []analysis.TextEdit{ + { + Pos: assignStmt.Pos(), + End: assignStmt.End(), + NewText: suggested, + }, + }, + }) + } + + return fixes +} + +func getCategory(pass *analysis.Pass, node ast.Node, assignStmt *ast.AssignStmt) string { switch node.(type) { case *ast.ForStmt, *ast.RangeStmt: - return "nested context in loop" - case *ast.FuncLit: - return "nested context in function literal" - default: - return "unsupported nested context type" + return categoryInLoop } -} -func getBody(node ast.Node) (*ast.BlockStmt, error) { - forStmt, ok := node.(*ast.ForStmt) - if ok { - return forStmt.Body, nil + if isPointer(pass, assignStmt.Lhs[0]) { + return categoryInStructPointer } - rangeStmt, ok := node.(*ast.RangeStmt) - if ok { - return rangeStmt.Body, nil + switch node.(type) { + case *ast.FuncLit, *ast.FuncDecl: + return categoryInFuncLit + default: + return categoryUnsupported } +} - funcLit, ok := node.(*ast.FuncLit) - if ok { - return funcLit.Body, nil +func getBody(node ast.Node) (*ast.BlockStmt, error) { + switch typedNode := node.(type) { + case *ast.ForStmt: + return typedNode.Body, nil + case *ast.RangeStmt: + return typedNode.Body, nil + case *ast.FuncLit: + return typedNode.Body, nil + case *ast.FuncDecl: + return typedNode.Body, nil } return nil, errUnknown @@ -108,44 +160,29 @@ func getBody(node ast.Node) (*ast.BlockStmt, error) { func findNestedContext(pass *analysis.Pass, node ast.Node, stmts []ast.Stmt) *ast.AssignStmt { for _, stmt := range stmts { // Recurse if necessary - if inner, ok := stmt.(*ast.BlockStmt); ok { - found := findNestedContext(pass, node, inner.List) - if found != nil { + switch typedStmt := stmt.(type) { + case *ast.BlockStmt: + if found := findNestedContext(pass, node, typedStmt.List); found != nil { return found } - } - - if inner, ok := stmt.(*ast.IfStmt); ok { - found := findNestedContext(pass, node, inner.Body.List) - if found != nil { + case *ast.IfStmt: + if found := findNestedContext(pass, node, typedStmt.Body.List); found != nil { return found } - } - - if inner, ok := stmt.(*ast.SwitchStmt); ok { - found := findNestedContext(pass, node, inner.Body.List) - if found != nil { + case *ast.SwitchStmt: + if found := findNestedContext(pass, node, typedStmt.Body.List); found != nil { return found } - } - - if inner, ok := stmt.(*ast.CaseClause); ok { - found := findNestedContext(pass, node, inner.Body) - if found != nil { + case *ast.CaseClause: + if found := findNestedContext(pass, node, typedStmt.Body); found != nil { return found } - } - - if inner, ok := stmt.(*ast.SelectStmt); ok { - found := findNestedContext(pass, node, inner.Body.List) - if found != nil { + case *ast.SelectStmt: + if found := findNestedContext(pass, node, typedStmt.Body.List); found != nil { return found } - } - - if inner, ok := stmt.(*ast.CommClause); ok { - found := findNestedContext(pass, node, inner.Body) - if found != nil { + case *ast.CommClause: + if found := findNestedContext(pass, node, typedStmt.Body); found != nil { return found } } @@ -169,13 +206,18 @@ func findNestedContext(pass *analysis.Pass, node ast.Node, stmts []ast.Stmt) *as continue } + // Ignore [context.Background] & [context.TODO]. + if isContextFunction(assignStmt.Rhs[0], "Background", "TODO") { + continue + } + + if isPointer(pass, assignStmt.Lhs[0]) { + return assignStmt + } + // allow assignment to non-pointer children of values defined within the loop - if lhs := getRootIdent(pass, assignStmt.Lhs[0]); lhs != nil { - if obj := pass.TypesInfo.ObjectOf(lhs); obj != nil { - if checkObjectScopeWithinNode(obj.Parent(), node) { - continue // definition is within the loop - } - } + if isWithinLoop(assignStmt.Lhs[0], node, pass) { + continue } return assignStmt @@ -184,16 +226,51 @@ func findNestedContext(pass *analysis.Pass, node ast.Node, stmts []ast.Stmt) *as return nil } -func checkObjectScopeWithinNode(scope *types.Scope, node ast.Node) bool { - if scope == nil { +// render returns the pretty-print of the given node +func render(fset *token.FileSet, x interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := printer.Fprint(&buf, fset, x); err != nil { + return nil, fmt.Errorf("printing node: %w", err) + } + return buf.Bytes(), nil +} + +func isContextFunction(exp ast.Expr, fnName ...string) bool { + call, ok := exp.(*ast.CallExpr) + if !ok { return false } - if scope.Pos() >= node.Pos() && scope.End() <= node.End() { - return true + selector, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return false } - return false + ident, ok := selector.X.(*ast.Ident) + if !ok { + return false + } + + return ident.Name == "context" && slices.Contains(fnName, selector.Sel.Name) +} + +func isWithinLoop(exp ast.Expr, node ast.Node, pass *analysis.Pass) bool { + lhs := getRootIdent(pass, exp) + if lhs == nil { + return false + } + + obj := pass.TypesInfo.ObjectOf(lhs) + if obj == nil { + return false + } + + scope := obj.Parent() + if scope == nil { + return false + } + + return scope.Pos() >= node.Pos() && scope.End() <= node.End() } func getRootIdent(pass *analysis.Pass, node ast.Node) *ast.Ident { @@ -214,11 +291,12 @@ func getRootIdent(pass *analysis.Pass, node ast.Node) *ast.Ident { } } -// render returns the pretty-print of the given node -func render(fset *token.FileSet, x interface{}) ([]byte, error) { - var buf bytes.Buffer - if err := printer.Fprint(&buf, fset, x); err != nil { - return nil, fmt.Errorf("printing node: %w", err) +func isPointer(pass *analysis.Pass, exp ast.Node) bool { + switch n := exp.(type) { + case *ast.SelectorExpr: + sel, ok := pass.TypesInfo.Selections[n] + return ok && sel.Indirect() } - return buf.Bytes(), nil + + return false } diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go index ec75fd409..5be31eb68 100644 --- a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go @@ -150,7 +150,7 @@ func getCompositeLitRelatedComments(stack []ast.Node, cm ast.CommentMap) []*ast. } func getStructType(pass *analysis.Pass, lit *ast.CompositeLit) (*types.Struct, *TypeInfo, bool) { - switch typ := pass.TypesInfo.TypeOf(lit).(type) { + switch typ := types.Unalias(pass.TypesInfo.TypeOf(lit)).(type) { case *types.Named: // named type if structTyp, ok := typ.Underlying().(*types.Struct); ok { pkg := typ.Obj().Pkg() diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md b/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md index 2ccfa22c5..0bf603b2b 100644 --- a/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md @@ -7,7 +7,7 @@ allow specific packages within a repository. ## Install ```bash -go install github.com/OpenPeeDeeP/depguard@latest +go install github.com/OpenPeeDeeP/depguard/cmd/depguard@latest ``` ## Config @@ -49,7 +49,7 @@ the linter's output. - `files` - list of file globs that will match this list of settings to compare against - `allow` - list of allowed packages - `deny` - map of packages that are not allowed where the value is a suggestion -= `listMode` - the mode to use for package matching +- `listMode` - the mode to use for package matching Files are matched using [Globs](https://github.com/gobwas/glob). If the files list is empty, then all files will match that list. Prefixing a file @@ -153,11 +153,29 @@ would be allowed. ```yaml Main: deny: - - github.com/OpenPeeDeeP/depguard$ + github.com/OpenPeeDeeP/depguard$: Please use v2 ``` -## Golangci-lint +## golangci-lint This linter was built with -[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatible -and read their docs to see how to implement all their linters, including this one. +[golangci-lint](https://github.com/golangci/golangci-lint) in mind, read the [linters docs](https://golangci-lint.run/usage/linters/#depguard) to see how to configure all their linters, including this one. + +The config is similar to the YAML depguard config documented above, however due to [golangci-lint limitation](https://github.com/golangci/golangci-lint/pull/4227) the `deny` value must be provided as a list, with `pkg` and `desc` keys (otherwise a [panic](https://github.com/OpenPeeDeeP/depguard/issues/74) may occur): + +```yaml +# golangci-lint config +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: lax # allow unless explicitely denied + files: + - $all + - "!$test" + allow: + - $gostd + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" +``` diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go index 2729091e8..af07b9bb6 100644 --- a/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go @@ -47,12 +47,12 @@ func (ua *UncompiledAnalyzer) Compile() error { return nil } -func (settings LinterSettings) run(pass *analysis.Pass) (interface{}, error) { - s, err := settings.compile() +func (s LinterSettings) run(pass *analysis.Pass) (interface{}, error) { + settings, err := s.compile() if err != nil { return nil, err } - return s.run(pass) + return settings.run(pass) } func newAnalyzer(run func(*analysis.Pass) (interface{}, error)) *analysis.Analyzer { diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go index 311cacc88..5bc74f8d0 100644 --- a/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go @@ -202,9 +202,9 @@ func (l LinterSettings) compile() (linterSettings, error) { return li, nil } -func (ls linterSettings) whichLists(fileName string) []*list { +func (s linterSettings) whichLists(fileName string) []*list { var matches []*list - for _, l := range ls { + for _, l := range s { if l.fileMatch(fileName) { matches = append(matches, l) } @@ -236,5 +236,13 @@ func strInPrefixList(str string, prefixList []string) (bool, int) { if ioc[len(ioc)-1] == '$' { return str == ioc[:len(ioc)-1], idx } - return strings.HasPrefix(str, prefixList[idx]), idx + + // There is no sep chars in ioc so it is a GOROOT import that is being matched to the import (str) (see $gostd expander) + // AND the import contains a period which GOROOT cannot have. This eliminates the go.evil.me/pkg scenario + // BUT should still allow /os/exec and ./os/exec imports which are very uncommon + if !strings.ContainsAny(ioc, "./") && strings.ContainsRune(str, '.') { + return false, idx + } + + return strings.HasPrefix(str, ioc), idx } diff --git a/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml b/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml new file mode 100644 index 000000000..758ae1a9e --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml @@ -0,0 +1,92 @@ +run: + tests: true + +output: + print-issued-lines: false + +linters: + enable-all: true + disable: + - cyclop + - depguard + - dupl + - dupword + - err113 + - errorlint + - exhaustive + - exhaustruct + - exportloopref + - forcetypeassert + - funlen + - gci + - gochecknoglobals + - gocognit + - goconst + - gocyclo + - godot + - godox + - gofumpt + - govet + - ireturn + - lll + - maintidx + - mnd + - mnd + - musttag + - nestif + - nilnil + - nlreturn + - nolintlint + - nonamedreturns + - paralleltest + - perfsprint + - predeclared + - revive + - stylecheck + - testableexamples + - testpackage + - thelper + - varnamelen + - wrapcheck + - wsl + +linters-settings: + govet: + enable: + - shadow + gocyclo: + min-complexity: 10 + dupl: + threshold: 100 + goconst: + min-len: 8 + min-occurrences: 3 + forbidigo: + exclude-godoc-examples: false + #forbid: + # - (Must)?NewLexer$ + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + exclude-use-default: false + exclude-dirs: + - _examples + exclude: + # Captured by errcheck. + - "^(G104|G204):" + # Very commonly not checked. + - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' + - 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos) should have comment or be unexported' + - "composite literal uses unkeyed fields" + - 'declaration of "err" shadows declaration' + - "should not use dot imports" + - "Potential file inclusion via variable" + - "should have comment or be unexported" + - "comment on exported var .* should be of the form" + - "at least one file in a package should have a package comment" + - "string literal contains the Unicode" + - "methods on the same type should have the same receiver name" + - "_TokenType_name should be _TokenTypeName" + - "`_TokenType_map` should be `_TokenTypeMap`" + - "rewrite if-else to switch statement" diff --git a/vendor/github.com/alecthomas/go-check-sumtype/README.md b/vendor/github.com/alecthomas/go-check-sumtype/README.md index 2ccec4e84..287aa68b7 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/README.md +++ b/vendor/github.com/alecthomas/go-check-sumtype/README.md @@ -92,6 +92,12 @@ passing checks, set the `-default-signifies-exhasutive=false` flag. As a special case, if the type switch statement contains a `default` clause that always panics, then exhaustiveness checks are still performed. +By default, `go-check-sumtype` will not include shared interfaces in the exhaustiviness check. +This can be changed by setting the `-include-shared-interfaces=true` flag. +When this flag is set, `go-check-sumtype` will not require that all concrete structs +are listed in the switch statement, as long as the switch statement is exhaustive +with respect to interfaces the structs implement. + ## Details and motivation Sum types are otherwise known as discriminated unions. That is, a sum type is diff --git a/vendor/github.com/alecthomas/go-check-sumtype/check.go b/vendor/github.com/alecthomas/go-check-sumtype/check.go index 1a0a32517..ff7fec728 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/check.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/check.go @@ -29,7 +29,7 @@ func (e inexhaustiveError) Error() string { // Names returns a sorted list of names corresponding to the missing variant // cases. func (e inexhaustiveError) Names() []string { - var list []string + list := make([]string, 0, len(e.Missing)) for _, o := range e.Missing { list = append(list, o.Name()) } @@ -92,6 +92,10 @@ func missingVariantsInSwitch( ) (*sumTypeDef, []types.Object) { asserted := findTypeAssertExpr(swtch) ty := pkg.TypesInfo.TypeOf(asserted) + if ty == nil { + panic(fmt.Sprintf("no type found for asserted expression: %v", asserted)) + } + def := findDef(defs, ty) if def == nil { // We couldn't find a corresponding sum type, so there's @@ -103,11 +107,11 @@ func missingVariantsInSwitch( // A catch-all case defeats all exhaustiveness checks. return def, nil } - var variantTypes []types.Type + variantTypes := make([]types.Type, 0, len(variantExprs)) for _, expr := range variantExprs { variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) } - return def, def.missing(variantTypes) + return def, def.missing(variantTypes, config.IncludeSharedInterfaces) } // switchVariants returns all case expressions found in a type switch. This diff --git a/vendor/github.com/alecthomas/go-check-sumtype/config.go b/vendor/github.com/alecthomas/go-check-sumtype/config.go index 759176eb7..5c722b75c 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/config.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/config.go @@ -2,4 +2,7 @@ package gochecksumtype type Config struct { DefaultSignifiesExhaustive bool + // IncludeSharedInterfaces in the exhaustiviness check. If true, we do not need to list all concrete structs, as long + // as the switch statement is exhaustive with respect to interfaces the structs implement. + IncludeSharedInterfaces bool } diff --git a/vendor/github.com/alecthomas/go-check-sumtype/def.go b/vendor/github.com/alecthomas/go-check-sumtype/def.go index 24729ac01..71bdf2f72 100644 --- a/vendor/github.com/alecthomas/go-check-sumtype/def.go +++ b/vendor/github.com/alecthomas/go-check-sumtype/def.go @@ -71,7 +71,7 @@ type sumTypeDef struct { // sum type declarations. If no such sum type definition could be found for // any of the given declarations, then an error is returned. func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) { - var defs []sumTypeDef + defs := make([]sumTypeDef, 0, len(decls)) var errs []error for _, decl := range decls { def, err := newSumTypeDef(decl.Package.Types, decl) @@ -104,7 +104,7 @@ func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) { return nil, notInterfaceError{decl} } hasUnexported := false - for i := 0; i < iface.NumMethods(); i++ { + for i := range iface.NumMethods() { if !iface.Method(i).Exported() { hasUnexported = true break @@ -145,7 +145,7 @@ func (def *sumTypeDef) String() string { // missing returns a list of variants in this sum type that are not in the // given list of types. -func (def *sumTypeDef) missing(tys []types.Type) []types.Object { +func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) []types.Object { // TODO(ag): This is O(n^2). Fix that. /shrug var missing []types.Object for _, v := range def.Variants { @@ -155,15 +155,29 @@ func (def *sumTypeDef) missing(tys []types.Type) []types.Object { ty = indirect(ty) if types.Identical(varty, ty) { found = true + break + } + if includeSharedInterfaces && implements(varty, ty) { + found = true + break } } - if !found { + if !found && !isInterface(varty) { + // we do not include interfaces extending the sumtype, as the + // all implementations of those interfaces are already covered + // by the sumtype. missing = append(missing, v) } } return missing } +func isInterface(ty types.Type) bool { + underlying := indirect(ty).Underlying() + _, ok := underlying.(*types.Interface) + return ok +} + // indirect dereferences through an arbitrary number of pointer types. func indirect(ty types.Type) types.Type { if ty, ok := ty.(*types.Pointer); ok { @@ -171,3 +185,11 @@ func indirect(ty types.Type) types.Type { } return ty } + +func implements(varty, interfaceType types.Type) bool { + underlying := interfaceType.Underlying() + if interf, ok := underlying.(*types.Interface); ok { + return types.Implements(varty, interf) || types.Implements(types.NewPointer(varty), interf) + } + return false +} diff --git a/vendor/github.com/alingse/nilnesserr/.gitignore b/vendor/github.com/alingse/nilnesserr/.gitignore new file mode 100644 index 000000000..6f72f8926 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/.gitignore @@ -0,0 +1,25 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env diff --git a/vendor/github.com/alingse/nilnesserr/.golangci.yaml b/vendor/github.com/alingse/nilnesserr/.golangci.yaml new file mode 100644 index 000000000..1a2a270a6 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/.golangci.yaml @@ -0,0 +1,66 @@ +linters: + enable-all: true + disable: + - wsl + - varnamelen + - nilnil + - ireturn + - gochecknoglobals + - nolintlint + +linters-settings: + depguard: + rules: + main: + list-mode: lax + files: + - $all + allow: + - $gostd + - github.com/alingse/nilnesserr + +issues: + exclude-rules: + - path: internal/typeparams + linters: + - nonamedreturns + - nlreturn + - intrange + - mnd + - forcetypeassert + - exhaustruct + - exhaustive + - err113 + - gofumpt + - prealloc + - funclen + - gocritic + - funlen + - cyclop + - gocognit + + - path: nilness.go + linters: + - nonamedreturns + - nlreturn + - nilnil + - mnd + - forcetypeassert + - gochecknoglobals + - nestif + - funlen + - godox + - gocognit + - gofumpt + - exhaustive + - cyclop + - unparam + - gocyclo + + - text: "analysis." + linters: + - exhaustruct + + - text: "newAnalyzer" + linters: + - unparam diff --git a/vendor/github.com/shazow/go-diff/LICENSE b/vendor/github.com/alingse/nilnesserr/LICENSE similarity index 94% rename from vendor/github.com/shazow/go-diff/LICENSE rename to vendor/github.com/alingse/nilnesserr/LICENSE index 85e1e4b33..6caf1ea1c 100644 --- a/vendor/github.com/shazow/go-diff/LICENSE +++ b/vendor/github.com/alingse/nilnesserr/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2015 Andrey Petrov +Copyright (c) 2024 alingse Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/alingse/nilnesserr/README.md b/vendor/github.com/alingse/nilnesserr/README.md new file mode 100644 index 000000000..6b199b6c5 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/README.md @@ -0,0 +1,74 @@ +# nilnesserr + +nilnesserr = nilness + nilerr + +`nilnesserr` is a linter for report return nil error in Go. It combines the features of [nilness](https://cs.opensource.google/go/x/tools/+/refs/tags/v0.28.0:go/analysis/passes/nilness/nilness.go) and [nilerr](https://github.com/gostaticanalysis/nilerr), providing a concise way to detect return an unrelated/nil-values error. + +## Case + +case 1 +```go +err := do() +if err != nil { + return err +} +err2 := do2() +if err2 != nil { + return err // which should return err2 after check `err2 != nil`, but return a nil value error +} +``` + + +## Some Real Bugs + +- https://github.com/alingse/sundrylint/issues/4 +- https://github.com/alingse/nilnesserr/issues/1 + +We use https://github.com/alingse/go-linter-runner to run linter on GitHub Actions for public Go repos + +## Install + +```bash +go install github.com/alingse/nilnesserr/cmd/nilnesserr@latest +``` + + +## TODO + +case 2 + +```go +err := do() +if err != nil { + return err +} +_, ok := do2() +if !ok { + return err +} + +``` + +case 3 + +```go +err := do() +if err != nil { + return err +} +_, ok := do2() +if !ok { + return errors.Wrap(err) +} +``` + +maybe this is also a bug, should return a non-nil value error after the if + +## License + +This project is licensed under the MIT License. See the LICENSE file for details. + +This project incorporates source code from two different libraries: + +1. [nilness](https://cs.opensource.google/go/x/tools/+/refs/tags/v0.28.0:go/analysis/passes/nilness/nilness.go) licensed under the BSD license. +2. [nilerr](https://github.com/gostaticanalysis/nilerr) licensed under the MIT license. diff --git a/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go b/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go new file mode 100644 index 000000000..7a744d123 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} diff --git a/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go b/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go new file mode 100644 index 000000000..0302872f4 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go @@ -0,0 +1,200 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" +) + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +var ErrNilType = errors.New("nil type") +var ErrUnreachable = errors.New("unreachable") + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + return nil, ErrNilType + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + return nil, ErrUnreachable + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go b/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go new file mode 100644 index 000000000..cbd12f801 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go b/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go new file mode 100644 index 000000000..35c66003d --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go @@ -0,0 +1,166 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/github.com/alingse/nilnesserr/linter.go b/vendor/github.com/alingse/nilnesserr/linter.go new file mode 100644 index 000000000..704e347ef --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/linter.go @@ -0,0 +1,48 @@ +package nilnesserr + +import ( + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" +) + +const ( + linterName = "nilnesserr" + linterDoc = `Reports constructs that checks for err != nil, but returns a different nil value error. +Powered by nilness and nilerr.` + + linterMessage = "return a nil value error after check error" +) + +type LinterSetting struct{} + +func NewAnalyzer(setting LinterSetting) (*analysis.Analyzer, error) { + a, err := newAnalyzer(setting) + if err != nil { + return nil, err + } + + return &analysis.Analyzer{ + Name: linterName, + Doc: linterDoc, + Run: a.run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, + }, nil +} + +type analyzer struct { + setting LinterSetting +} + +func newAnalyzer(setting LinterSetting) (*analyzer, error) { + a := &analyzer{setting: setting} + + return a, nil +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + _, _ = a.checkNilnesserr(pass) + + return nil, nil +} diff --git a/vendor/github.com/alingse/nilnesserr/nilerr.go b/vendor/github.com/alingse/nilnesserr/nilerr.go new file mode 100644 index 000000000..c05ec9003 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/nilerr.go @@ -0,0 +1,83 @@ +// some code was copy from https://github.com/gostaticanalysis/nilerr/blob/master/nilerr.go + +package nilnesserr + +import ( + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ssa" +) + +var errType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) // nolint: forcetypeassert + +func isErrType(res ssa.Value) bool { + return types.Implements(res.Type(), errType) +} + +func isConstNil(res ssa.Value) bool { + v, ok := res.(*ssa.Const) + if ok && v.IsNil() { + return true + } + + return false +} + +func extractCheckedErrorValue(binOp *ssa.BinOp) ssa.Value { + if isErrType(binOp.X) && isConstNil(binOp.Y) { + return binOp.X + } + if isErrType(binOp.Y) && isConstNil(binOp.X) { + return binOp.Y + } + + return nil +} + +type errFact fact + +func findLastNonnilValue(errors []errFact, res ssa.Value) ssa.Value { + if len(errors) == 0 { + return nil + } + + for j := len(errors) - 1; j >= 0; j-- { + last := errors[j] + if last.value == res { + return nil + } else if last.nilness == isnonnil { + return last.value + } + } + + return nil +} + +func checkNilnesserr(pass *analysis.Pass, b *ssa.BasicBlock, errors []errFact, isNilnees func(value ssa.Value) bool) { + for i := range b.Instrs { + instr, ok := b.Instrs[i].(*ssa.Return) + if !ok { + continue + } + + for _, res := range instr.Results { + if !isErrType(res) || isConstNil(res) || !isNilnees(res) { + continue + } + // check the lastValue error that is isnonnil + lastValue := findLastNonnilValue(errors, res) + if lastValue == nil { + continue + } + // report + pos := instr.Pos() + if pos.IsValid() { + pass.Report(analysis.Diagnostic{ + Pos: pos, + Message: linterMessage, + }) + } + } + } +} diff --git a/vendor/github.com/alingse/nilnesserr/nilness.go b/vendor/github.com/alingse/nilnesserr/nilness.go new file mode 100644 index 000000000..cd5a69107 --- /dev/null +++ b/vendor/github.com/alingse/nilnesserr/nilness.go @@ -0,0 +1,374 @@ +// This file was copy from https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/nilness/nilness.go +// I modified some to check the error return + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nilnesserr + +import ( + "go/token" + "go/types" + + "github.com/alingse/nilnesserr/internal/typeparams" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +func (a *analyzer) checkNilnesserr(pass *analysis.Pass) (interface{}, error) { + ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + for _, fn := range ssainput.SrcFuncs { + runFunc(pass, fn) + } + return nil, nil +} + +func runFunc(pass *analysis.Pass, fn *ssa.Function) { + // visit visits reachable blocks of the CFG in dominance order, + // maintaining a stack of dominating nilness facts. + // + // By traversing the dom tree, we can pop facts off the stack as + // soon as we've visited a subtree. Had we traversed the CFG, + // we would need to retain the set of facts for each block. + seen := make([]bool, len(fn.Blocks)) // seen[i] means visit should ignore block i + + var visit func(b *ssa.BasicBlock, stack []fact, errors []errFact) + + visit = func(b *ssa.BasicBlock, stack []fact, errors []errFact) { + if seen[b.Index] { + return + } + seen[b.Index] = true + + // check this block return a nil value error + checkNilnesserr( + pass, b, + errors, + func(v ssa.Value) bool { + return nilnessOf(stack, v) == isnil + }) + + // For nil comparison blocks, report an error if the condition + // is degenerate, and push a nilness fact on the stack when + // visiting its true and false successor blocks. + if binop, tsucc, fsucc := eq(b); binop != nil { + // extract the err != nil or err == nil + errValue := extractCheckedErrorValue(binop) + + xnil := nilnessOf(stack, binop.X) + ynil := nilnessOf(stack, binop.Y) + + if ynil != unknown && xnil != unknown && (xnil == isnil || ynil == isnil) { + // Degenerate condition: + // the nilness of both operands is known, + // and at least one of them is nil. + + // If tsucc's or fsucc's sole incoming edge is impossible, + // it is unreachable. Prune traversal of it and + // all the blocks it dominates. + // (We could be more precise with full dataflow + // analysis of control-flow joins.) + var skip *ssa.BasicBlock + if xnil == ynil { + skip = fsucc + } else { + skip = tsucc + } + for _, d := range b.Dominees() { + if d == skip && len(d.Preds) == 1 { + continue + } + + visit(d, stack, errors) + } + + return + } + + // "if x == nil" or "if nil == y" condition; x, y are unknown. + if xnil == isnil || ynil == isnil { + var newFacts facts + if xnil == isnil { + // x is nil, y is unknown: + // t successor learns y is nil. + newFacts = expandFacts(fact{binop.Y, isnil}) + } else { + // y is nil, x is unknown: + // t successor learns x is nil. + newFacts = expandFacts(fact{binop.X, isnil}) + } + + for _, d := range b.Dominees() { + // Successor blocks learn a fact + // only at non-critical edges. + // (We could do be more precise with full dataflow + // analysis of control-flow joins.) + s := stack + errs := errors + if len(d.Preds) == 1 { + if d == tsucc { + s = append(s, newFacts...) + // add nil error + if errValue != nil { + errs = append(errs, errFact{value: errValue, nilness: isnil}) + } + } else if d == fsucc { + s = append(s, newFacts.negate()...) + // add non-nil error + if errValue != nil { + errs = append(errs, errFact{value: errValue, nilness: isnonnil}) + } + } + } + + visit(d, s, errs) + } + return + } + } + + // In code of the form: + // + // if ptr, ok := x.(*T); ok { ... } else { fsucc } + // + // the fsucc block learns that ptr == nil, + // since that's its zero value. + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + // Handle "if ok" and "if !ok" variants. + cond, fsucc := If.Cond, b.Succs[1] + if unop, ok := cond.(*ssa.UnOp); ok && unop.Op == token.NOT { + cond, fsucc = unop.X, b.Succs[0] + } + + // Match pattern: + // t0 = typeassert (pointerlike) + // t1 = extract t0 #0 // ptr + // t2 = extract t0 #1 // ok + // if t2 goto tsucc, fsucc + if extract1, ok := cond.(*ssa.Extract); ok && extract1.Index == 1 { + if assert, ok := extract1.Tuple.(*ssa.TypeAssert); ok && + isNillable(assert.AssertedType) { + for _, pinstr := range *assert.Referrers() { + if extract0, ok := pinstr.(*ssa.Extract); ok && + extract0.Index == 0 && + extract0.Tuple == extract1.Tuple { + for _, d := range b.Dominees() { + if len(d.Preds) == 1 && d == fsucc { + visit(d, append(stack, fact{extract0, isnil}), errors) + } + } + } + } + } + } + } + + for _, d := range b.Dominees() { + visit(d, stack, errors) + } + } + + // Visit the entry block. No need to visit fn.Recover. + if fn.Blocks != nil { + visit(fn.Blocks[0], make([]fact, 0, 20), nil) // 20 is plenty + } +} + +// A fact records that a block is dominated +// by the condition v == nil or v != nil. +type fact struct { + value ssa.Value + nilness nilness +} + +func (f fact) negate() fact { return fact{f.value, -f.nilness} } + +type nilness int + +const ( + isnonnil = -1 + unknown nilness = 0 + isnil = 1 +) + +var nilnessStrings = []string{"non-nil", "unknown", "nil"} + +func (n nilness) String() string { return nilnessStrings[n+1] } + +// nilnessOf reports whether v is definitely nil, definitely not nil, +// or unknown given the dominating stack of facts. +func nilnessOf(stack []fact, v ssa.Value) nilness { + switch v := v.(type) { + // unwrap ChangeInterface and Slice values recursively, to detect if underlying + // values have any facts recorded or are otherwise known with regard to nilness. + // + // This work must be in addition to expanding facts about + // ChangeInterfaces during inference/fact gathering because this covers + // cases where the nilness of a value is intrinsic, rather than based + // on inferred facts, such as a zero value interface variable. That + // said, this work alone would only inform us when facts are about + // underlying values, rather than outer values, when the analysis is + // transitive in both directions. + case *ssa.ChangeInterface: + if underlying := nilnessOf(stack, v.X); underlying != unknown { + return underlying + } + case *ssa.MakeInterface: + // A MakeInterface is non-nil unless its operand is a type parameter. + tparam, ok := types.Unalias(v.X.Type()).(*types.TypeParam) + if !ok { + return isnonnil + } + + // A MakeInterface of a type parameter is non-nil if + // the type parameter cannot be instantiated as an + // interface type (#66835). + if terms, err := typeparams.NormalTerms(tparam.Constraint()); err == nil && len(terms) > 0 { + return isnonnil + } + + // If the type parameter can be instantiated as an + // interface (and thus also as a concrete type), + // we can't determine the nilness. + + case *ssa.Slice: + if underlying := nilnessOf(stack, v.X); underlying != unknown { + return underlying + } + case *ssa.SliceToArrayPointer: + nn := nilnessOf(stack, v.X) + if slice2ArrayPtrLen(v) > 0 { + if nn == isnil { + // We know that *(*[1]byte)(nil) is going to panic because of the + // conversion. So return unknown to the caller, prevent useless + // nil deference reporting due to * operator. + return unknown + } + // Otherwise, the conversion will yield a non-nil pointer to array. + // Note that the instruction can still panic if array length greater + // than slice length. If the value is used by another instruction, + // that instruction can assume the panic did not happen when that + // instruction is reached. + return isnonnil + } + // In case array length is zero, the conversion result depends on nilness of the slice. + if nn != unknown { + return nn + } + } + + // Is value intrinsically nil or non-nil? + switch v := v.(type) { + case *ssa.Alloc, + *ssa.FieldAddr, + *ssa.FreeVar, + *ssa.Function, + *ssa.Global, + *ssa.IndexAddr, + *ssa.MakeChan, + *ssa.MakeClosure, + *ssa.MakeMap, + *ssa.MakeSlice: + return isnonnil + + case *ssa.Const: + if v.IsNil() { + return isnil // nil or zero value of a pointer-like type + } else { + return unknown // non-pointer + } + } + + // Search dominating control-flow facts. + for _, f := range stack { + if f.value == v { + return f.nilness + } + } + return unknown +} + +func slice2ArrayPtrLen(v *ssa.SliceToArrayPointer) int64 { + return v.Type().(*types.Pointer).Elem().Underlying().(*types.Array).Len() +} + +// If b ends with an equality comparison, eq returns the operation and +// its true (equal) and false (not equal) successors. +func eq(b *ssa.BasicBlock) (op *ssa.BinOp, tsucc, fsucc *ssa.BasicBlock) { + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + if binop, ok := If.Cond.(*ssa.BinOp); ok { + switch binop.Op { + case token.EQL: + return binop, b.Succs[0], b.Succs[1] + case token.NEQ: + return binop, b.Succs[1], b.Succs[0] + } + } + } + return nil, nil, nil +} + +// expandFacts takes a single fact and returns the set of facts that can be +// known about it or any of its related values. Some operations, like +// ChangeInterface, have transitive nilness, such that if you know the +// underlying value is nil, you also know the value itself is nil, and vice +// versa. This operation allows callers to match on any of the related values +// in analyses, rather than just the one form of the value that happened to +// appear in a comparison. +// +// This work must be in addition to unwrapping values within nilnessOf because +// while this work helps give facts about transitively known values based on +// inferred facts, the recursive check within nilnessOf covers cases where +// nilness facts are intrinsic to the underlying value, such as a zero value +// interface variables. +// +// ChangeInterface is the only expansion currently supported, but others, like +// Slice, could be added. At this time, this tool does not check slice +// operations in a way this expansion could help. See +// https://play.golang.org/p/mGqXEp7w4fR for an example. +func expandFacts(f fact) []fact { + ff := []fact{f} + +Loop: + for { + switch v := f.value.(type) { + case *ssa.ChangeInterface: + f = fact{v.X, f.nilness} + ff = append(ff, f) + default: + break Loop + } + } + + return ff +} + +type facts []fact + +func (ff facts) negate() facts { + nn := make([]fact, len(ff)) + for i, f := range ff { + nn[i] = f.negate() + } + return nn +} + +func isNillable(t types.Type) bool { + // TODO(adonovan): CoreType (+ case *Interface) looks wrong. + // This should probably use Underlying, and handle TypeParam + // by computing the union across its normal terms. + switch t := typeparams.CoreType(t).(type) { + case *types.Pointer, + *types.Map, + *types.Signature, + *types.Chan, + *types.Interface, + *types.Slice: + return true + case *types.Basic: + return t == types.Typ[types.UnsafePointer] + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index a015cc5b2..3219517da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -6,6 +6,7 @@ import ( smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // HTTPClient provides the interface to provide custom HTTPClients. Generally @@ -192,6 +193,17 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation + + // Registry of HTTP interceptors. + Interceptors smithyhttp.InterceptorRegistry + + // Priority list of preferred auth scheme IDs. + AuthSchemePreference []string + + // ServiceOptions provides service specific configuration options that will be applied + // when constructing clients for specific services. Each callback function receives the service ID + // and the service's Options struct, allowing for dynamic configuration based on the service. + ServiceOptions []func(string, any) } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index d0f3094bc..b72921f87 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.5" +const goModuleVersion = "1.38.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 52d59b04b..5549922ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -260,7 +260,7 @@ func (r *Attempt) handleAttempt( // Get a retry token that will be released after the releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) if retryTokenErr != nil { - return out, attemptResult, nopRelease, retryTokenErr + return out, attemptResult, nopRelease, errors.Join(err, retryTokenErr) } //------------------------------ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index a9d68c515..e0ebf3903 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,37 @@ +# v1.4.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2025-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2025-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.36 (2025-06-17) * **Dependency Update**: Update to smithy-go v1.22.4. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index dfc815100..3479c11c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.36" +const goModuleVersion = "1.4.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 5f0779997..d4e6611f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig { var partitions = []Partition{ { ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ Name: "aws", DnsSuffix: "amazonaws.com", @@ -35,6 +35,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-northeast-1": { Name: nil, DnsSuffix: nil, @@ -98,6 +105,27 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-5": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-6": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-7": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "aws-global": { Name: nil, DnsSuffix: nil, @@ -196,6 +224,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "mx-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "sa-east-1": { Name: nil, DnsSuffix: nil, @@ -269,32 +304,18 @@ var partitions = []Partition{ }, }, { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", + ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { + "eusc-de-east-1": { Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, @@ -309,7 +330,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso", DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "api.aws.ic.gov", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-iso-east-1", @@ -344,7 +365,7 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-b", DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "api.aws.scloud", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-isob-east-1", @@ -372,12 +393,19 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-e", DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides{ + "aws-iso-e-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-isoe-west-1": { Name: nil, DnsSuffix: nil, @@ -393,11 +421,68 @@ var partitions = []Partition{ DefaultConfig: PartitionConfig{ Name: "aws-iso-f", DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "api.aws.hci.ic.gov", SupportsFIPS: true, SupportsDualStack: false, ImplicitGlobalRegion: "us-isof-south-1", }, - Regions: map[string]RegionOverrides{}, + Regions: map[string]RegionOverrides{ + "aws-iso-f-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", + }, + Regions: map[string]RegionOverrides{ + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 456b07fca..c6582c9c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -50,11 +50,14 @@ "ap-southeast-5" : { "description" : "Asia Pacific (Malaysia)" }, + "ap-southeast-6" : { + "description" : "Asia Pacific (New Zealand)" + }, "ap-southeast-7" : { "description" : "Asia Pacific (Thailand)" }, "aws-global" : { - "description" : "AWS Standard global region" + "description" : "aws global region" }, "ca-central-1" : { "description" : "Canada (Central)" @@ -127,7 +130,7 @@ "regionRegex" : "^cn\\-\\w+\\-\\d+$", "regions" : { "aws-cn-global" : { - "description" : "AWS China global region" + "description" : "aws-cn global region" }, "cn-north-1" : { "description" : "China (Beijing)" @@ -137,32 +140,26 @@ } } }, { - "id" : "aws-us-gov", + "id" : "aws-eusc", "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "api.amazonwebservices.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", "regions" : { - "aws-us-gov-global" : { - "description" : "AWS GovCloud (US) global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" + "eusc-de-east-1" : { + "description" : "EU (Germany)" } } }, { "id" : "aws-iso", "outputs" : { "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "api.aws.ic.gov", "implicitGlobalRegion" : "us-iso-east-1", "name" : "aws-iso", "supportsDualStack" : false, @@ -171,7 +168,7 @@ "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", "regions" : { "aws-iso-global" : { - "description" : "AWS ISO (US) global region" + "description" : "aws-iso global region" }, "us-iso-east-1" : { "description" : "US ISO East" @@ -184,7 +181,7 @@ "id" : "aws-iso-b", "outputs" : { "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "api.aws.scloud", "implicitGlobalRegion" : "us-isob-east-1", "name" : "aws-iso-b", "supportsDualStack" : false, @@ -193,7 +190,7 @@ "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", "regions" : { "aws-iso-b-global" : { - "description" : "AWS ISOB (US) global region" + "description" : "aws-iso-b global region" }, "us-isob-east-1" : { "description" : "US ISOB East (Ohio)" @@ -203,7 +200,7 @@ "id" : "aws-iso-e", "outputs" : { "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk", "implicitGlobalRegion" : "eu-isoe-west-1", "name" : "aws-iso-e", "supportsDualStack" : false, @@ -212,7 +209,7 @@ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { "aws-iso-e-global" : { - "description" : "AWS ISOE (Europe) global region" + "description" : "aws-iso-e global region" }, "eu-isoe-west-1" : { "description" : "EU ISOE West" @@ -222,7 +219,7 @@ "id" : "aws-iso-f", "outputs" : { "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "api.aws.hci.ic.gov", "implicitGlobalRegion" : "us-isof-south-1", "name" : "aws-iso-f", "supportsDualStack" : false, @@ -231,7 +228,7 @@ "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", "regions" : { "aws-iso-f-global" : { - "description" : "AWS ISOF global region" + "description" : "aws-iso-f global region" }, "us-isof-east-1" : { "description" : "US ISOF EAST" @@ -241,19 +238,25 @@ } } }, { - "id" : "aws-eusc", + "id" : "aws-us-gov", "outputs" : { - "dnsSuffix" : "amazonaws.eu", - "dualStackDnsSuffix" : "amazonaws.eu", - "implicitGlobalRegion" : "eusc-de-east-1", - "name" : "aws-eusc", - "supportsDualStack" : false, + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", "regions" : { - "eusc-de-east-1" : { - "description" : "EU (Germany)" + "aws-us-gov-global" : { + "description" : "aws-us-gov global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" } } } ], diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 01dc55c87..7ccb39033 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,37 @@ +# v2.7.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.3 (2025-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.2 (2025-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.7.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.36 (2025-06-17) * **Dependency Update**: Update to smithy-go v1.22.4. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 44c39bc0a..2d36cac95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.36" +const goModuleVersion = "2.7.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md index 8bad01b37..4ad41549a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md @@ -1,3 +1,163 @@ +# v1.249.0 (2025-08-29) + +* **Feature**: Release shows new route types such as filtered and advertisement. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.248.0 (2025-08-28) + +* **Feature**: This release adds support for copying Amazon EBS snapshot and AMIs to and from Local Zones. + +# v1.247.1 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.247.0 (2025-08-26) + +* **Feature**: Add new APIs for viewing how your shared AMIs are used by other accounts, and identify resources in your account that are dependent on particular AMIs + +# v1.246.0 (2025-08-25) + +* **Feature**: Added IPv6 support for AWS Client VPN. + +# v1.245.2 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.245.1 (2025-08-20) + +* **Bug Fix**: Remove unused deserialization code. + +# v1.245.0 (2025-08-19) + +* **Feature**: Add support for "warning" volume status. + +# v1.244.0 (2025-08-14) + +* **Feature**: This release adds ModifyInstanceConnectEndpoint API to update configurations on existing EC2 Instance Connect Endpoints and improves IPv6 support through dualstack DNS names for EC2 Instance Connect Endpoints. + +# v1.243.0 (2025-08-12) + +* **Feature**: Release to allow route table association with a PublicIpv4Pool. + +# v1.242.0 (2025-08-11) + +* **Feature**: Add support for configuring per-service Options via callback on global config. +* **Feature**: This release adds AvailabilityZoneId support for CreateVolume, DescribeVolume, LaunchTemplates, RunInstances, DescribeInstances, CreateDefaultSubnet, SpotInstances, and CreateDefaultSubnet APIs. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.241.0 (2025-08-06) + +* **Feature**: Mark Elastic Inference Accelerators and Elastic Graphics Processor parameters as deprecated on the RunInstances and LaunchTemplate APIs. + +# v1.240.0 (2025-08-04) + +* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.239.0 (2025-07-31) + +* **Feature**: Added support for the force option for the EC2 instance terminate command. This feature enables customers to recover resources associated with an instance stuck in the shutting-down state as a result of rare issues caused by a frozen operating system or an underlying hardware problem. + +# v1.238.0 (2025-07-30) + +* **Feature**: Release to show the next hop IP address for routes propagated by VPC Route Server into VPC route tables. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.237.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.236.0 (2025-07-25) + +* **Feature**: Transit Gateway native integration with AWS Network Firewall. Adding new enum value for the new Transit Gateway Attachment type. + +# v1.235.0 (2025-07-23) + +* **Feature**: Added support for skip-os-shutdown option for the EC2 instance stop and terminate operations. This feature enables customers to bypass the graceful OS shutdown, supporting faster state transitions when instance data preservation isn't critical. + +# v1.234.0 (2025-07-21) + +* **Feature**: This release adds support for C8gn, F2 and P6e-GB200 Instance types + +# v1.233.1 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.233.0 (2025-07-17) + +* **Feature**: AWS Free Tier Version2 Support + +# v1.232.0 (2025-07-15) + +* **Feature**: This release adds support for volume initialization status, which enables you to monitor when the initialization process for an EBS volume is completed. This release also adds IPv6 support to EC2 Instance Connect Endpoints, allowing you to connect to your EC2 Instance via a private IPv6 address. + +# v1.231.0 (2025-07-09) + +* **Feature**: Adds support to Capacity Blocks for ML for purchasing EC2 P6e-GB200 UltraServers. Customers can now purchase u-p6e-gb200x72 and u-p6e-gb200x36 UltraServers. Adds new DescribeCapacityBlocks andDescribeCapacityBlockStatus APIs. Adds support for CapacityBlockId to DescribeInstanceTopology. + +# v1.230.0 (2025-07-03) + +* **Feature**: This release adds GroupOwnerId as a response member to the DescribeSecurityGroupVpcAssociations API and also adds waiters for SecurityGroupVpcAssociations (SecurityGroupVpcAssociationAssociated and SecurityGroupVpcAssociationDisassociated). + +# v1.229.0 (2025-07-02) + +* **Feature**: AWS Site-to-Site VPN now supports IPv6 addresses on outer tunnel IPs, making it easier for customers to build or transition to IPv6-only networks. + +# v1.228.0 (2025-07-01) + +* **Feature**: Add Context to GetInstanceTypesFromInstanceRequirements API + +# v1.227.0 (2025-06-26) + +* **Feature**: This release adds support for OdbNetworkArn as a target in VPC Route Tables + +# v1.226.0 (2025-06-24) + +* **Feature**: This release allows you to create and register AMIs while maintaining their underlying EBS snapshots within Local Zones. + +# v1.225.2 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.225.1 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.225.0 (2025-06-09) + +* **Feature**: Release to support Elastic VMware Service (Amazon EVS) Subnet and Amazon EVS Network Interface Types. + +# v1.224.1 (2025-06-06) + +* No change notes available for this release. + +# v1.224.0 (2025-05-28) + +* **Feature**: Enable the option to automatically delete underlying Amazon EBS snapshots when deregistering Amazon Machine Images (AMIs) + +# v1.223.0 (2025-05-27) + +* **Feature**: This release adds three features - option to store AWS Site-to-Site VPN pre-shared keys in AWS Secrets Manager, GetActiveVpnTunnelStatus API to check the in-use VPN algorithms, and SampleType option in GetVpnConnectionDeviceSampleConfiguration API to get recommended sample configs for VPN devices. + +# v1.222.0 (2025-05-23) + +* **Feature**: This release adds support for the C7i-flex, M7i-flex, I7i, I7ie, I8g, P6-b200, Trn2, C8gd, M8gd and R8gd instances + +# v1.221.0 (2025-05-21) + +* **Feature**: Release of Dualstack and Ipv6-only EC2 Public DNS hostnames + +# v1.220.0 (2025-05-20) + +* **Feature**: This release expands the ModifyInstanceMaintenanceOptions API to enable or disable instance migration during customer-initiated reboots for EC2 Scheduled Reboot Events. + +# v1.219.0 (2025-05-19) + +* **Feature**: This release includes new APIs for System Integrity Protection (SIP) configuration and automated root volume ownership delegation for EC2 Mac instances. + # v1.218.0 (2025-05-12) * **Feature**: EC2 - Adding support for AvailabilityZoneId diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go index 2a7294485..31695be56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go @@ -427,24 +427,33 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AuthSchemePreference: cfg.AuthSchemePreference, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) resolveAWSRetryMode(cfg, &opts) resolveAWSEndpointResolver(cfg, &opts) + resolveInterceptors(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) + return New(opts, func(o *Options) { + for _, opt := range cfg.ServiceOptions { + opt(ServiceID, o) + } + for _, opt := range optFns { + opt(o) + } + }) } func resolveHTTPClient(o *Options) { @@ -558,6 +567,10 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) } +func resolveInterceptors(cfg aws.Config, o *Options) { + o.Interceptors = cfg.Interceptors.Copy() +} + func addClientUserAgent(stack *middleware.Stack, options Options) error { ua, err := getOrAddRequestUserAgent(stack) if err != nil { @@ -1024,6 +1037,69 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { }, "ResolveEndpointV2", middleware.After) } +func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{ + Interceptors: opts.Interceptors.BeforeRetryLoop, + }, "Retry", middleware.Before) +} + +func addInterceptAttempt(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{ + BeforeAttempt: opts.Interceptors.BeforeAttempt, + AfterAttempt: opts.Interceptors.AfterAttempt, + }, "Retry", middleware.After) +} + +func addInterceptExecution(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&smithyhttp.InterceptExecution{ + BeforeExecution: opts.Interceptors.BeforeExecution, + AfterExecution: opts.Interceptors.AfterExecution, + }, middleware.Before) +} + +func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error { + return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{ + Interceptors: opts.Interceptors.BeforeSerialization, + }, "OperationSerializer", middleware.Before) +} + +func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error { + return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{ + Interceptors: opts.Interceptors.AfterSerialization, + }, "OperationSerializer", middleware.After) +} + +func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{ + Interceptors: opts.Interceptors.BeforeSigning, + }, "Signing", middleware.Before) +} + +func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error { + return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{ + Interceptors: opts.Interceptors.AfterSigning, + }, "Signing", middleware.After) +} + +func addInterceptTransmit(stack *middleware.Stack, opts Options) error { + return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{ + BeforeTransmit: opts.Interceptors.BeforeTransmit, + AfterTransmit: opts.Interceptors.AfterTransmit, + }, middleware.After) +} + +func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error { + return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{ + Interceptors: opts.Interceptors.BeforeDeserialization, + }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse) +} + +func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error { + return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{ + Interceptors: opts.Interceptors.AfterDeserialization, + }, "OperationDeserializer", middleware.Before) +} + type spanInitializeStart struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go index 4652a6b0c..3504b3de3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go @@ -151,6 +151,36 @@ func (c *Client) addOperationAcceptAddressTransferMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go index e6f0c2a7b..a8492062f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go @@ -144,6 +144,36 @@ func (c *Client) addOperationAcceptCapacityReservationBillingOwnershipMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go index 8827cfe50..ad47fe2ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go @@ -149,6 +149,36 @@ func (c *Client) addOperationAcceptReservedInstancesExchangeQuoteMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go index ddb8e9347..3e24a0a60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go @@ -143,6 +143,36 @@ func (c *Client) addOperationAcceptTransitGatewayMulticastDomainAssociationsMidd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go index 5ea0d0147..5a0e09f1d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go @@ -143,6 +143,36 @@ func (c *Client) addOperationAcceptTransitGatewayPeeringAttachmentMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go index 6d8077c5a..cd6acdf59 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go @@ -145,6 +145,36 @@ func (c *Client) addOperationAcceptTransitGatewayVpcAttachmentMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go index 52327eb0e..b325b8d09 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go @@ -147,6 +147,36 @@ func (c *Client) addOperationAcceptVpcEndpointConnectionsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go index 4ca7541cb..af4ae07a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go @@ -148,6 +148,36 @@ func (c *Client) addOperationAcceptVpcPeeringConnectionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go index 21a166c98..2ac181089 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go @@ -25,8 +25,6 @@ import ( // // It can take a few minutes before traffic to the specified addresses starts // routing to Amazon Web Services because of BGP propagation delays. -// -// To stop advertising the BYOIP CIDR, use WithdrawByoipCidr. func (c *Client) AdvertiseByoipCidr(ctx context.Context, params *AdvertiseByoipCidrInput, optFns ...func(*Options)) (*AdvertiseByoipCidrOutput, error) { if params == nil { params = &AdvertiseByoipCidrInput{} @@ -181,6 +179,36 @@ func (c *Client) addOperationAdvertiseByoipCidrMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go index 2e5b63915..58899ccff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go @@ -211,6 +211,36 @@ func (c *Client) addOperationAllocateAddressMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go index b8385eb3f..a225d52a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go @@ -218,6 +218,36 @@ func (c *Client) addOperationAllocateHostsMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go index 933d56278..2e6b412fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go @@ -202,6 +202,36 @@ func (c *Client) addOperationAllocateIpamPoolCidrMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go index 93999d67a..ab70e2179 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go @@ -154,6 +154,36 @@ func (c *Client) addOperationApplySecurityGroupsToClientVpnTargetNetworkMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go index 6323f310e..5abacb18c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go @@ -176,6 +176,36 @@ func (c *Client) addOperationAssignIpv6AddressesMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go index 72df0d701..13476d9f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go @@ -193,6 +193,36 @@ func (c *Client) addOperationAssignPrivateIpAddressesMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go index 08d17ad10..828f0ff4a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go @@ -155,6 +155,36 @@ func (c *Client) addOperationAssignPrivateNatGatewayAddressMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go index cd8bcb78b..f82030729 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go @@ -182,6 +182,36 @@ func (c *Client) addOperationAssociateAddressMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go index d4145916e..4cef3f904 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go @@ -150,6 +150,36 @@ func (c *Client) addOperationAssociateCapacityReservationBillingOwnerMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go index 3edf2cca0..4c94c99ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go @@ -168,6 +168,36 @@ func (c *Client) addOperationAssociateClientVpnTargetNetworkMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go index 1b04c25a9..d8cacfba1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go @@ -154,6 +154,36 @@ func (c *Client) addOperationAssociateDhcpOptionsMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go index c3e3e1ada..751c28255 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go @@ -173,6 +173,36 @@ func (c *Client) addOperationAssociateEnclaveCertificateIamRoleMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go index 885ccab24..c931e95f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go @@ -142,6 +142,36 @@ func (c *Client) addOperationAssociateIamInstanceProfileMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go index 9fb87f544..b86902697 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go @@ -153,6 +153,36 @@ func (c *Client) addOperationAssociateInstanceEventWindowMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go index afe7a72e1..d98e02fb3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go @@ -156,6 +156,36 @@ func (c *Client) addOperationAssociateIpamByoasnMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go index 24d328e46..af3cd404a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go @@ -159,6 +159,36 @@ func (c *Client) addOperationAssociateIpamResourceDiscoveryMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go index 3c87b06be..170dca9b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go @@ -171,6 +171,36 @@ func (c *Client) addOperationAssociateNatGatewayAddressMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteServer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteServer.go index d12c44aab..1bff1b551 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteServer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteServer.go @@ -154,6 +154,36 @@ func (c *Client) addOperationAssociateRouteServerMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go index a4c5941c9..f6f9bd2b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go @@ -52,6 +52,10 @@ type AssociateRouteTableInput struct { // The ID of the internet gateway or virtual private gateway. GatewayId *string + // The ID of a public IPv4 pool. A public IPv4 pool is a pool of IPv4 addresses + // that you've brought to Amazon Web Services with BYOIP. + PublicIpv4Pool *string + // The ID of the subnet. SubnetId *string @@ -161,6 +165,36 @@ func (c *Client) addOperationAssociateRouteTableMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go index 7287e7a67..69fc31c27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go @@ -21,7 +21,7 @@ import ( // - You can associate the security group with another VPC if your account owns // the VPC or if the VPC was shared with you. // -// - You must own the security group and the VPC that it was created in. +// - You must own the security group. // // - You cannot use this feature with default security groups. // @@ -161,6 +161,36 @@ func (c *Client) addOperationAssociateSecurityGroupVpcMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go index f14b485fa..e1bac07dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go @@ -149,6 +149,36 @@ func (c *Client) addOperationAssociateSubnetCidrBlockMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go index 50f8329f9..cc7b2a3cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go @@ -159,6 +159,36 @@ func (c *Client) addOperationAssociateTransitGatewayMulticastDomainMiddlewares(s if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go index 1629c934e..457e401f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go @@ -150,6 +150,36 @@ func (c *Client) addOperationAssociateTransitGatewayPolicyTableMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go index e90872100..e8b45d258 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go @@ -148,6 +148,36 @@ func (c *Client) addOperationAssociateTransitGatewayRouteTableMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go index cd0feeebb..9656d084c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go @@ -175,6 +175,36 @@ func (c *Client) addOperationAssociateTrunkInterfaceMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go index fe239aac7..3f3c53d3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go @@ -206,6 +206,36 @@ func (c *Client) addOperationAssociateVpcCidrBlockMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go index 8ca899fc0..f75057a6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go @@ -165,6 +165,36 @@ func (c *Client) addOperationAttachClassicLinkVpcMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go index e6ff7ecb2..7c24930b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go @@ -146,6 +146,36 @@ func (c *Client) addOperationAttachInternetGatewayMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go index 6f8f18a70..6f6ed3702 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go @@ -169,6 +169,36 @@ func (c *Client) addOperationAttachNetworkInterfaceMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go index 9e32f0ce6..f25ad3208 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go @@ -160,6 +160,36 @@ func (c *Client) addOperationAttachVerifiedAccessTrustProviderMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go index 4cea55b0a..31d6f972d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go @@ -12,13 +12,15 @@ import ( "time" ) -// Attaches an EBS volume to a running or stopped instance and exposes it to the -// instance with the specified device name. +// Attaches an Amazon EBS volume to a running or stopped instance, and exposes it +// to the instance with the specified device name. // -// Encrypted EBS volumes must be attached to instances that support Amazon EBS -// encryption. For more information, see [Amazon EBS encryption]in the Amazon EBS User Guide. +// The maximum number of Amazon EBS volumes that you can attach to an instance +// depends on the instance type. If you exceed the volume attachment limit for an +// instance type, the attachment request fails with the AttachmentLimitExceeded +// error. For more information, see [Instance volume limits]. // -// After you attach an EBS volume, you must make it available. For more +// After you attach an EBS volume, you must make it available for use. For more // information, see [Make an EBS volume available for use]. // // If a volume has an Amazon Web Services Marketplace product code: @@ -36,9 +38,9 @@ import ( // // For more information, see [Attach an Amazon EBS volume to an instance] in the Amazon EBS User Guide. // -// [Amazon EBS encryption]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption.html // [Make an EBS volume available for use]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-using-volumes.html // [Attach an Amazon EBS volume to an instance]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-attaching-volume.html +// [Instance volume limits]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html func (c *Client) AttachVolume(ctx context.Context, params *AttachVolumeInput, optFns ...func(*Options)) (*AttachVolumeOutput, error) { if params == nil { params = &AttachVolumeInput{} @@ -84,7 +86,8 @@ type AttachVolumeInput struct { // Describes volume attachment details. type AttachVolumeOutput struct { - // The ARN of the Amazon ECS or Fargate task to which the volume is attached. + // The ARN of the Amazon Web Services-managed resource to which the volume is + // attached. AssociatedResource *string // The time stamp when the attachment initiated. @@ -95,18 +98,21 @@ type AttachVolumeOutput struct { // The device name. // - // If the volume is attached to a Fargate task, this parameter returns null . + // If the volume is attached to an Amazon Web Services-managed resource, this + // parameter returns null . Device *string // The ID of the instance. // - // If the volume is attached to a Fargate task, this parameter returns null . + // If the volume is attached to an Amazon Web Services-managed resource, this + // parameter returns null . InstanceId *string - // The service principal of Amazon Web Services service that owns the underlying - // instance to which the volume is attached. + // The service principal of the Amazon Web Services service that owns the + // underlying resource to which the volume is attached. // - // This parameter is returned only for volumes that are attached to Fargate tasks. + // This parameter is returned only for volumes that are attached to Amazon Web + // Services-managed resources. InstanceOwningService *string // The attachment state of the volume. @@ -209,6 +215,36 @@ func (c *Client) addOperationAttachVolumeMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go index 12b106367..aa72e47be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go @@ -155,6 +155,36 @@ func (c *Client) addOperationAttachVpnGatewayMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go index 66d3cbf1f..4934aa586 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go @@ -173,6 +173,36 @@ func (c *Client) addOperationAuthorizeClientVpnIngressMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go index 4519480d0..446882858 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go @@ -192,6 +192,36 @@ func (c *Client) addOperationAuthorizeSecurityGroupEgressMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go index a0ebe5ede..6715e78f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go @@ -237,6 +237,36 @@ func (c *Client) addOperationAuthorizeSecurityGroupIngressMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go index 532ab269f..e6cafdedf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go @@ -159,6 +159,36 @@ func (c *Client) addOperationBundleInstanceMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go index 7393864c1..d1f33e5ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go @@ -144,6 +144,36 @@ func (c *Client) addOperationCancelBundleTaskMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go index ae5819686..12b7fe131 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go @@ -163,6 +163,36 @@ func (c *Client) addOperationCancelCapacityReservationMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go index 9e9a61dc2..7ac719f71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go @@ -155,6 +155,36 @@ func (c *Client) addOperationCancelCapacityReservationFleetsMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go index 663dfefff..9e504af33 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go @@ -144,6 +144,36 @@ func (c *Client) addOperationCancelConversionTaskMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go index f720038a4..4ac1091eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go @@ -148,6 +148,36 @@ func (c *Client) addOperationCancelDeclarativePoliciesReportMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go index 7be8de075..ce03a758d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go @@ -135,6 +135,36 @@ func (c *Client) addOperationCancelExportTaskMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go index 6f1767050..aaa246d1e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go @@ -144,6 +144,36 @@ func (c *Client) addOperationCancelImageLaunchPermissionMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go index 935fa60e6..545c81685 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go @@ -145,6 +145,36 @@ func (c *Client) addOperationCancelImportTaskMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go index 8c410d673..e90d4be2d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go @@ -143,6 +143,36 @@ func (c *Client) addOperationCancelReservedInstancesListingMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go index c95478771..8bdde7176 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go @@ -171,6 +171,36 @@ func (c *Client) addOperationCancelSpotFleetRequestsMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go index 4212b5803..a7b758a3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go @@ -147,6 +147,36 @@ func (c *Client) addOperationCancelSpotInstanceRequestsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go index 059090583..3bf8d884a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go @@ -153,6 +153,36 @@ func (c *Client) addOperationConfirmProductInstanceMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go index 7d227a013..a1a7eb339 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go @@ -158,6 +158,36 @@ func (c *Client) addOperationCopyFpgaImageMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go index 74dfd2936..5cd5515d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go @@ -11,24 +11,71 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Initiates an AMI copy operation. You can copy an AMI from one Region to -// another, or from a Region to an Outpost. You can't copy an AMI from an Outpost -// to a Region, from one Outpost to another, or within the same Outpost. To copy an -// AMI to another partition, see [CreateStoreImageTask]. +// Initiates an AMI copy operation. You must specify the source AMI ID and both +// the source and destination locations. The copy operation must be initiated in +// the destination Region. // -// When you copy an AMI from one Region to another, the destination Region is the -// current Region. +// CopyImage supports the following source to destination copies: // -// When you copy an AMI from a Region to an Outpost, specify the ARN of the -// Outpost as the destination. Backing snapshots copied to an Outpost are encrypted -// by default using the default encryption key for the Region or the key that you -// specify. Outposts do not support unencrypted snapshots. +// - Region to Region // -// For information about the prerequisites when copying an AMI, see [Copy an AMI] in the Amazon -// EC2 User Guide. +// - Region to Outpost +// +// - Parent Region to Local Zone +// +// - Local Zone to parent Region +// +// - Between Local Zones with the same parent Region (only supported for certain +// Local Zones) +// +// CopyImage does not support the following source to destination copies: +// +// - Local Zone to non-parent Regions +// +// - Between Local Zones with different parent Regions +// +// - Local Zone to Outpost +// +// - Outpost to Local Zone +// +// - Outpost to Region +// +// - Between Outposts +// +// - Within same Outpost +// +// - Cross-partition copies (use [CreateStoreImageTask]instead) +// +// Destination specification +// +// - Region to Region: The destination Region is the Region in which you +// initiate the copy operation. +// +// - Region to Outpost: Specify the destination using the DestinationOutpostArn +// parameter (the ARN of the Outpost) +// +// - Region to Local Zone, and Local Zone to Local Zone copies: Specify the +// destination using the DestinationAvailabilityZone parameter (the name of the +// destination Local Zone) or DestinationAvailabilityZoneId parameter (the ID of +// the destination Local Zone). +// +// Snapshot encryption +// +// - Region to Outpost: Backing snapshots copied to an Outpost are encrypted by +// default using the default encryption key for the Region or the key that you +// specify. Outposts do not support unencrypted snapshots. +// +// - Region to Local Zone, and Local Zone to Local Zone: Not all Local Zones +// require encrypted snapshots. In Local Zones that require encrypted snapshots, +// backing snapshots are automatically encrypted during copy. In Local Zones where +// encryption is not required, snapshots retain their original encryption state +// (encrypted or unencrypted) by default. +// +// For more information, including the required permissions for copying an AMI, +// see [Copy an Amazon EC2 AMI]in the Amazon EC2 User Guide. // // [CreateStoreImageTask]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html -// [Copy an AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html +// [Copy an Amazon EC2 AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html func (c *Client) CopyImage(ctx context.Context, params *CopyImageInput, optFns ...func(*Options)) (*CopyImageOutput, error) { if params == nil { params = &CopyImageInput{} @@ -47,7 +94,7 @@ func (c *Client) CopyImage(ctx context.Context, params *CopyImageInput, optFns . // Contains the parameters for CopyImage. type CopyImageInput struct { - // The name of the new AMI in the destination Region. + // The name of the new AMI. // // This member is required. Name *string @@ -63,14 +110,14 @@ type CopyImageInput struct { SourceRegion *string // Unique, case-sensitive identifier you provide to ensure idempotency of the - // request. For more information, see [Ensuring idempotency]in the Amazon EC2 API Reference. + // request. For more information, see [Ensuring idempotency in Amazon EC2 API requests]in the Amazon EC2 API Reference. // - // [Ensuring idempotency]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html + // [Ensuring idempotency in Amazon EC2 API requests]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html ClientToken *string - // Indicates whether to include your user-defined AMI tags when copying the AMI. + // Specifies whether to copy your user-defined AMI tags to the new AMI. // - // The following tags will not be copied: + // The following tags are not be copied: // // - System tags (prefixed with aws: ) // @@ -80,17 +127,33 @@ type CopyImageInput struct { // Default: Your user-defined AMI tags are not copied. CopyImageTags *bool - // A description for the new AMI in the destination Region. + // A description for the new AMI. Description *string - // The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only - // specify this parameter when copying an AMI from an Amazon Web Services Region to - // an Outpost. The AMI must be in the Region of the destination Outpost. You cannot - // copy an AMI from an Outpost to a Region, from one Outpost to another, or within - // the same Outpost. + // The Local Zone for the new AMI (for example, cn-north-1-pkx-1a ). + // + // Only one of DestinationAvailabilityZone , DestinationAvailabilityZoneId , or + // DestinationOutpostArn can be specified. + DestinationAvailabilityZone *string + + // The ID of the Local Zone for the new AMI (for example, cnn1-pkx1-az1 ). + // + // Only one of DestinationAvailabilityZone , DestinationAvailabilityZoneId , or + // DestinationOutpostArn can be specified. + DestinationAvailabilityZoneId *string + + // The Amazon Resource Name (ARN) of the Outpost for the new AMI. + // + // Only specify this parameter when copying an AMI from an Amazon Web Services + // Region to an Outpost. The AMI must be in the Region of the destination Outpost. + // You can't copy an AMI from an Outpost to a Region, from one Outpost to another, + // or within the same Outpost. // // For more information, see [Copy AMIs from an Amazon Web Services Region to an Outpost] in the Amazon EBS User Guide. // + // Only one of DestinationAvailabilityZone , DestinationAvailabilityZoneId , or + // DestinationOutpostArn can be specified. + // // [Copy AMIs from an Amazon Web Services Region to an Outpost]: https://docs.aws.amazon.com/ebs/latest/userguide/snapshots-outposts.html#copy-amis DestinationOutpostArn *string @@ -100,12 +163,12 @@ type CopyImageInput struct { // UnauthorizedOperation . DryRun *bool - // Specifies whether the destination snapshots of the copied image should be - // encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot - // create an unencrypted copy of an encrypted snapshot. The default KMS key for - // Amazon EBS is used unless you specify a non-default Key Management Service (KMS) - // KMS key using KmsKeyId . For more information, see [Use encryption with EBS-backed AMIs] in the Amazon EC2 User - // Guide. + // Specifies whether to encrypt the snapshots of the copied image. + // + // You can encrypt a copy of an unencrypted snapshot, but you cannot create an + // unencrypted copy of an encrypted snapshot. The default KMS key for Amazon EBS is + // used unless you specify a non-default Key Management Service (KMS) KMS key using + // KmsKeyId . For more information, see [Use encryption with EBS-backed AMIs] in the Amazon EC2 User Guide. // // [Use encryption with EBS-backed AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html Encrypted *bool @@ -139,14 +202,19 @@ type CopyImageInput struct { // Specify a completion duration, in 15 minute increments, to initiate a // time-based AMI copy. The specified completion duration applies to each of the // snapshots associated with the AMI. Each snapshot associated with the AMI will be - // completed within the specified completion duration, regardless of their size. + // completed within the specified completion duration, with copy throughput + // automatically adjusted for each snapshot based on its size to meet the timing + // target. // // If you do not specify a value, the AMI copy operation is completed on a // best-effort basis. // - // For more information, see [Time-based copies]. + // This parameter is not supported when copying an AMI to or from a Local Zone, or + // to an Outpost. // - // [Time-based copies]: https://docs.aws.amazon.com/ebs/latest/userguide/time-based-copies.html + // For more information, see [Time-based copies for Amazon EBS snapshots and EBS-backed AMIs]. + // + // [Time-based copies for Amazon EBS snapshots and EBS-backed AMIs]: https://docs.aws.amazon.com/ebs/latest/userguide/time-based-copies.html SnapshotCopyCompletionDurationMinutes *int64 // The tags to apply to the new AMI and new snapshots. You can tag the AMI, the @@ -270,6 +338,36 @@ func (c *Client) addOperationCopyImageMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go index 463203491..04748f31e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go @@ -13,12 +13,20 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. -// You can copy a snapshot within the same Region, from one Region to another, or -// from a Region to an Outpost. You can't copy a snapshot from an Outpost to a -// Region, from one Outpost to another, or within the same Outpost. +// Creates an exact copy of an Amazon EBS snapshot. // -// You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). +// The location of the source snapshot determines whether you can copy it or not, +// and the allowed destinations for the snapshot copy. +// +// - If the source snapshot is in a Region, you can copy it within that Region, +// to another Region, to an Outpost associated with that Region, or to a Local Zone +// in that Region. +// +// - If the source snapshot is in a Local Zone, you can copy it within that +// Local Zone, to another Local Zone in the same zone group, or to the parent +// Region of the Local Zone. +// +// - If the source snapshot is on an Outpost, you can't copy it. // // When copying snapshots to a Region, copies of encrypted EBS snapshots remain // encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable @@ -32,8 +40,8 @@ import ( // request using KmsKeyId. Outposts do not support unencrypted snapshots. For more // information, see [Amazon EBS local snapshots on Outposts]in the Amazon EBS User Guide. // -// Snapshots created by copying another snapshot have an arbitrary volume ID that -// should not be used for any purpose. +// Snapshots copies have an arbitrary source volume ID. Do not use this volume ID +// for any purpose. // // For more information, see [Copy an Amazon EBS snapshot] in the Amazon EBS User Guide. // @@ -66,6 +74,8 @@ type CopySnapshotInput struct { // This member is required. SourceSnapshotId *string + // Not supported when copying snapshots to or from Local Zones or Outposts. + // // Specify a completion duration, in 15 minute increments, to initiate a // time-based snapshot copy. Time-based snapshot copy operations complete within // the specified duration. For more information, see [Time-based copies]. @@ -79,11 +89,14 @@ type CopySnapshotInput struct { // A description for the EBS snapshot. Description *string + // The Local Zone, for example, cn-north-1-pkx-1a to which to copy the snapshot. + // + // Only supported when copying a snapshot to a Local Zone. + DestinationAvailabilityZone *string + // The Amazon Resource Name (ARN) of the Outpost to which to copy the snapshot. - // Only specify this parameter when copying a snapshot from an Amazon Web Services - // Region to an Outpost. The snapshot must be in the Region for the destination - // Outpost. You cannot copy a snapshot from an Outpost to a Region, from one - // Outpost to another, or within the same Outpost. + // + // Only supported when copying a snapshot to an Outpost. // // For more information, see [Copy snapshots from an Amazon Web Services Region to an Outpost] in the Amazon EBS User Guide. // @@ -257,6 +270,36 @@ func (c *Client) addOperationCopySnapshotMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go index 5c36aae99..b32ae0c5d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go @@ -55,9 +55,9 @@ type CreateCapacityReservationInput struct { // The number of instances for which to reserve capacity. // // You can request future-dated Capacity Reservations for an instance count with a - // minimum of 100 vCPUs. For example, if you request a future-dated Capacity - // Reservation for m5.xlarge instances, you must request at least 25 instances (25 - // * m5.xlarge = 100 vCPUs). + // minimum of 64 vCPUs. For example, if you request a future-dated Capacity + // Reservation for m5.xlarge instances, you must request at least 25 instances (16 + // * m5.xlarge = 64 vCPUs). // // Valid range: 1 - 1000 // @@ -72,7 +72,7 @@ type CreateCapacityReservationInput struct { // The instance type for which to reserve capacity. // // You can request future-dated Capacity Reservations for instance types in the C, - // M, R, I, and T instance families only. + // M, R, I, T, and G instance families only. // // For more information, see [Instance types] in the Amazon EC2 User Guide. // @@ -322,6 +322,36 @@ func (c *Client) addOperationCreateCapacityReservationMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go index 8d2918f6e..84af9cdd2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go @@ -170,6 +170,36 @@ func (c *Client) addOperationCreateCapacityReservationBySplittingMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go index f4a9fbacf..9c3e2aab6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go @@ -242,6 +242,36 @@ func (c *Client) addOperationCreateCapacityReservationFleetMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go index 9b4d96802..20b50e54e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go @@ -157,6 +157,36 @@ func (c *Client) addOperationCreateCarrierGatewayMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go index 6704b3517..29816345f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go @@ -36,16 +36,6 @@ type CreateClientVpnEndpointInput struct { // This member is required. AuthenticationOptions []types.ClientVpnAuthenticationRequest - // The IPv4 address range, in CIDR notation, from which to assign client IP - // addresses. The address range cannot overlap with the local CIDR of the VPC in - // which the associated subnet is located, or the routes that you add manually. The - // address range cannot be changed after the Client VPN endpoint has been created. - // Client CIDR range must have a size of at least /22 and must not be greater than - // /12. - // - // This member is required. - ClientCidrBlock *string - // Information about the client connection logging options. // // If you enable client connection logging, data about client connections is sent @@ -69,6 +59,14 @@ type CreateClientVpnEndpointInput struct { // This member is required. ServerCertificateArn *string + // The IPv4 address range, in CIDR notation, from which to assign client IP + // addresses. The address range cannot overlap with the local CIDR of the VPC in + // which the associated subnet is located, or the routes that you add manually. The + // address range cannot be changed after the Client VPN endpoint has been created. + // Client CIDR range must have a size of at least /22 and must not be greater than + // /12. + ClientCidrBlock *string + // The options for managing connection authorization for new client connections. ClientConnectOptions *types.ClientConnectOptions @@ -114,6 +112,12 @@ type CreateClientVpnEndpointInput struct { // UnauthorizedOperation . DryRun *bool + // The IP address type for the Client VPN endpoint. Valid values are ipv4 + // (default) for IPv4 addressing only, ipv6 for IPv6 addressing only, or dual-stack + // for both IPv4 and IPv6 addressing. When set to dual-stack, clients can connect + // to the endpoint using either IPv4 or IPv6 addresses.. + EndpointIpAddressType types.EndpointIpAddressType + // The IDs of one or more security groups to apply to the target network. You must // also specify the ID of the VPC that contains the security groups. SecurityGroupIds []string @@ -143,6 +147,12 @@ type CreateClientVpnEndpointInput struct { // The tags to apply to the Client VPN endpoint during creation. TagSpecifications []types.TagSpecification + // The IP address type for traffic within the Client VPN tunnel. Valid values are + // ipv4 (default) for IPv4 traffic only, ipv6 for IPv6 addressing only, or + // dual-stack for both IPv4 and IPv6 traffic. When set to dual-stack , clients can + // access both IPv4 and IPv6 resources through the VPN . + TrafficIpAddressType types.TrafficIpAddressType + // The transport protocol to be used by the VPN session. // // Default value: udp @@ -271,6 +281,36 @@ func (c *Client) addOperationCreateClientVpnEndpointMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go index 39ead32b0..89601e317 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go @@ -179,6 +179,36 @@ func (c *Client) addOperationCreateClientVpnRouteMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go index bd52562b7..89aa0ebb0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go @@ -147,6 +147,36 @@ func (c *Client) addOperationCreateCoipCidrMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go index 9858dd9e1..be07fe962 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go @@ -145,6 +145,36 @@ func (c *Client) addOperationCreateCoipPoolMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go index 3a4012aa4..22bb71554 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go @@ -82,10 +82,10 @@ type CreateCustomerGatewayInput struct { // UnauthorizedOperation . DryRun *bool - // IPv4 address for the customer gateway device's outside interface. The address + // The IP address for the customer gateway device's outside interface. The address // must be static. If OutsideIpAddressType in your VPN connection options is set // to PrivateIpv4 , you can use an RFC6598 or RFC1918 private IPv4 address. If - // OutsideIpAddressType is set to PublicIpv4 , you can use a public IPv4 address. + // OutsideIpAddressType is set to Ipv6 , you can use an IPv6 address. IpAddress *string // This member has been deprecated. The Internet-routable IP address for the @@ -198,6 +198,36 @@ func (c *Client) addOperationCreateCustomerGatewayMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go index 5d7ef24ec..d08e53403 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go @@ -35,9 +35,14 @@ type CreateDefaultSubnetInput struct { // The Availability Zone in which to create the default subnet. // - // This member is required. + // Either AvailabilityZone or AvailabilityZoneId must be specified, but not both. AvailabilityZone *string + // The ID of the Availability Zone. + // + // Either AvailabilityZone or AvailabilityZoneId must be specified, but not both. + AvailabilityZoneId *string + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is @@ -130,9 +135,6 @@ func (c *Client) addOperationCreateDefaultSubnetMiddlewares(stack *middleware.St if err = addCredentialSource(stack, options); err != nil { return err } - if err = addOpCreateDefaultSubnetValidationMiddleware(stack); err != nil { - return err - } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDefaultSubnet(options.Region), middleware.Before); err != nil { return err } @@ -151,6 +153,36 @@ func (c *Client) addOperationCreateDefaultSubnetMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go index 37d297ff1..e86792bdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go @@ -142,6 +142,36 @@ func (c *Client) addOperationCreateDefaultVpcMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDelegateMacVolumeOwnershipTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDelegateMacVolumeOwnershipTask.go new file mode 100644 index 000000000..51bcea413 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDelegateMacVolumeOwnershipTask.go @@ -0,0 +1,269 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Delegates ownership of the Amazon EBS root volume for an Apple silicon Mac +// instance to an administrative user. +func (c *Client) CreateDelegateMacVolumeOwnershipTask(ctx context.Context, params *CreateDelegateMacVolumeOwnershipTaskInput, optFns ...func(*Options)) (*CreateDelegateMacVolumeOwnershipTaskOutput, error) { + if params == nil { + params = &CreateDelegateMacVolumeOwnershipTaskInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateDelegateMacVolumeOwnershipTask", params, optFns, c.addOperationCreateDelegateMacVolumeOwnershipTaskMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateDelegateMacVolumeOwnershipTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateDelegateMacVolumeOwnershipTaskInput struct { + + // The ID of the Amazon EC2 Mac instance. + // + // This member is required. + InstanceId *string + + // Specifies the following credentials: + // + // - Internal disk administrative user + // + // - Username - Only the default administrative user ( aws-managed-user ) is + // supported and it is used by default. You can't specify a different + // administrative user. + // + // - Password - If you did not change the default password for aws-managed-user , + // specify the default password, which is blank. Otherwise, specify your password. + // + // - Amazon EBS root volume administrative user + // + // - Username - If you did not change the default administrative user, specify + // ec2-user . Otherwise, specify the username for your administrative user. + // + // - Password - Specify the password for the administrative user. + // + // The credentials must be specified in the following JSON format: + // + // { "internalDiskPassword":"internal-disk-admin_password", + // "rootVolumeUsername":"root-volume-admin_username", + // "rootVolumepassword":"root-volume-admin_password" } + // + // This member is required. + MacCredentials *string + + // Unique, case-sensitive identifier that you provide to ensure the idempotency of + // the request. For more information, see [Ensuring Idempotency]. + // + // [Ensuring Idempotency]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html + ClientToken *string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // The tags to assign to the volume ownership delegation task. + TagSpecifications []types.TagSpecification + + noSmithyDocumentSerde +} + +type CreateDelegateMacVolumeOwnershipTaskOutput struct { + + // Information about the volume ownership delegation task. + MacModificationTask *types.MacModificationTask + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateDelegateMacVolumeOwnershipTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpCreateDelegateMacVolumeOwnershipTask{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateDelegateMacVolumeOwnershipTask{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateDelegateMacVolumeOwnershipTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateDelegateMacVolumeOwnershipTaskMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateDelegateMacVolumeOwnershipTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDelegateMacVolumeOwnershipTask(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateDelegateMacVolumeOwnershipTask struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateDelegateMacVolumeOwnershipTask) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateDelegateMacVolumeOwnershipTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateDelegateMacVolumeOwnershipTaskInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateDelegateMacVolumeOwnershipTaskInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateDelegateMacVolumeOwnershipTaskMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateDelegateMacVolumeOwnershipTask{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateDelegateMacVolumeOwnershipTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateDelegateMacVolumeOwnershipTask", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go index 72229495a..442f8821d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go @@ -189,6 +189,36 @@ func (c *Client) addOperationCreateDhcpOptionsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go index c2347e585..93dad2f99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go @@ -158,6 +158,36 @@ func (c *Client) addOperationCreateEgressOnlyInternetGatewayMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go index 3301784f6..ef4a10052 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go @@ -248,6 +248,36 @@ func (c *Client) addOperationCreateFleetMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go index 64582576c..6b95610ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go @@ -261,6 +261,36 @@ func (c *Client) addOperationCreateFlowLogsMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go index 764d38bf4..d5a6a1625 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go @@ -173,6 +173,36 @@ func (c *Client) addOperationCreateFpgaImageMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go index 38fe4ff75..cb0e0eed3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go @@ -19,9 +19,18 @@ import ( // mapping information for those volumes. When you launch an instance from this new // AMI, the instance automatically launches with those additional volumes. // -// For more information, see [Create an Amazon EBS-backed Linux AMI] in the Amazon Elastic Compute Cloud User Guide. +// The location of the source instance determines where you can create the +// snapshots of the AMI: // -// [Create an Amazon EBS-backed Linux AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html +// - If the source instance is in a Region, you must create the snapshots in the +// same Region as the instance. +// +// - If the source instance is in a Local Zone, you can create the snapshots in +// the same Local Zone or in its parent Region. +// +// For more information, see [Create an Amazon EBS-backed AMI] in the Amazon Elastic Compute Cloud User Guide. +// +// [Create an Amazon EBS-backed AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html func (c *Client) CreateImage(ctx context.Context, params *CreateImageInput, optFns ...func(*Options)) (*CreateImageOutput, error) { if params == nil { params = &CreateImageInput{} @@ -64,7 +73,8 @@ type CreateImageInput struct { // - You can't modify the encryption status of existing volumes or snapshots. To // create an AMI with volumes or snapshots that have a different encryption status // (for example, where the source volume and snapshots are unencrypted, and you - // want to create an AMI with encrypted volumes or snapshots), use the CopyImageaction. + // want to create an AMI with encrypted volumes or snapshots), copy the image + // instead. // // - The only option that can be changed for existing mappings or snapshots is // DeleteOnTermination . @@ -95,6 +105,20 @@ type CreateImageInput struct { // Default: false NoReboot *bool + // Only supported for instances in Local Zones. If the source instance is not in a + // Local Zone, omit this parameter. + // + // The Amazon S3 location where the snapshots will be stored. + // + // - To create local snapshots in the same Local Zone as the source instance, + // specify local . + // + // - To create regional snapshots in the parent Region of the Local Zone, + // specify regional or omit this parameter. + // + // Default: regional + SnapshotLocation types.SnapshotLocationEnum + // The tags to apply to the AMI and snapshots on creation. You can tag the AMI, // the snapshots, or both. // @@ -214,6 +238,36 @@ func (c *Client) addOperationCreateImageMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImageUsageReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImageUsageReport.go new file mode 100644 index 000000000..7424c37e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImageUsageReport.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a report that shows how your image is used across other Amazon Web +// Services accounts. The report provides visibility into which accounts are using +// the specified image, and how many resources (EC2 instances or launch templates) +// are referencing it. +// +// For more information, see [View your AMI usage] in the Amazon EC2 User Guide. +// +// [View your AMI usage]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/your-ec2-ami-usage.html +func (c *Client) CreateImageUsageReport(ctx context.Context, params *CreateImageUsageReportInput, optFns ...func(*Options)) (*CreateImageUsageReportOutput, error) { + if params == nil { + params = &CreateImageUsageReportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateImageUsageReport", params, optFns, c.addOperationCreateImageUsageReportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateImageUsageReportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateImageUsageReportInput struct { + + // The ID of the image to report on. + // + // This member is required. + ImageId *string + + // The resource types to include in the report. + // + // This member is required. + ResourceTypes []types.ImageUsageResourceTypeRequest + + // The Amazon Web Services account IDs to include in the report. To include all + // accounts, omit this parameter. + AccountIds []string + + // A unique, case-sensitive identifier that you provide to ensure idempotency of + // the request. + ClientToken *string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // The tags to apply to the report on creation. The ResourceType must be set to + // image-usage-report ; any other value will cause the report creation to fail. + // + // To tag a report after it has been created, see [CreateTags]. + // + // [CreateTags]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html + TagSpecifications []types.TagSpecification + + noSmithyDocumentSerde +} + +type CreateImageUsageReportOutput struct { + + // The ID of the report. + ReportId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateImageUsageReportMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpCreateImageUsageReport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateImageUsageReport{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateImageUsageReport"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateImageUsageReportMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateImageUsageReportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateImageUsageReport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateImageUsageReport struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateImageUsageReport) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateImageUsageReport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateImageUsageReportInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateImageUsageReportInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateImageUsageReportMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateImageUsageReport{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateImageUsageReport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateImageUsageReport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go index 477a38ac3..1edf78096 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go @@ -14,10 +14,10 @@ import ( // Creates an EC2 Instance Connect Endpoint. // // An EC2 Instance Connect Endpoint allows you to connect to an instance, without -// requiring the instance to have a public IPv4 address. For more information, see [Connect to your instances without requiring a public IPv4 address using EC2 Instance Connect Endpoint] -// in the Amazon EC2 User Guide. +// requiring the instance to have a public IPv4 or public IPv6 address. For more +// information, see [Connect to your instances using EC2 Instance Connect Endpoint]in the Amazon EC2 User Guide. // -// [Connect to your instances without requiring a public IPv4 address using EC2 Instance Connect Endpoint]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Connect-using-EC2-Instance-Connect-Endpoint.html +// [Connect to your instances using EC2 Instance Connect Endpoint]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Connect-using-EC2-Instance-Connect-Endpoint.html func (c *Client) CreateInstanceConnectEndpoint(ctx context.Context, params *CreateInstanceConnectEndpointInput, optFns ...func(*Options)) (*CreateInstanceConnectEndpointOutput, error) { if params == nil { params = &CreateInstanceConnectEndpointInput{} @@ -50,6 +50,21 @@ type CreateInstanceConnectEndpointInput struct { // UnauthorizedOperation . DryRun *bool + // The IP address type of the endpoint. + // + // If no value is specified, the default value is determined by the IP address + // type of the subnet: + // + // - dualstack - If the subnet has both IPv4 and IPv6 CIDRs + // + // - ipv4 - If the subnet has only IPv4 CIDRs + // + // - ipv6 - If the subnet has only IPv6 CIDRs + // + // PreserveClientIp is only supported on IPv4 EC2 Instance Connect Endpoints. To + // use PreserveClientIp , the value for IpAddressType must be ipv4 . + IpAddressType types.IpAddressType + // Indicates whether the client IP address is preserved as the source. The // following are the possible values. // @@ -57,6 +72,9 @@ type CreateInstanceConnectEndpointInput struct { // // - false - Use the network interface IP address as the source. // + // PreserveClientIp is only supported on IPv4 EC2 Instance Connect Endpoints. To + // use PreserveClientIp , the value for IpAddressType must be ipv4 . + // // Default: false PreserveClientIp *bool @@ -177,6 +195,36 @@ func (c *Client) addOperationCreateInstanceConnectEndpointMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go index de9609807..5b87b1bb9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go @@ -192,6 +192,36 @@ func (c *Client) addOperationCreateInstanceEventWindowMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go index b3387ca02..19fb0f703 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go @@ -159,6 +159,36 @@ func (c *Client) addOperationCreateInstanceExportTaskMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go index 38aa1c36c..8fd5801d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go @@ -142,6 +142,36 @@ func (c *Client) addOperationCreateInternetGatewayMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go index a804b0778..063ee7f8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go @@ -196,6 +196,36 @@ func (c *Client) addOperationCreateIpamMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go index 69ccb5fd8..ea54ad796 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go @@ -158,6 +158,36 @@ func (c *Client) addOperationCreateIpamExternalResourceVerificationTokenMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go index d03c7cb4f..7762dbe6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go @@ -253,6 +253,36 @@ func (c *Client) addOperationCreateIpamPoolMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go index dea05d7b6..860218036 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go @@ -154,6 +154,36 @@ func (c *Client) addOperationCreateIpamResourceDiscoveryMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go index d8e63b6eb..8813544e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go @@ -169,6 +169,36 @@ func (c *Client) addOperationCreateIpamScopeMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go index 15adb7061..4d34e33c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go @@ -190,6 +190,36 @@ func (c *Client) addOperationCreateKeyPairMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go index f8b630f75..d18f0c9d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go @@ -193,6 +193,36 @@ func (c *Client) addOperationCreateLaunchTemplateMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go index 6fc79a0ac..9d118c872 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go @@ -212,6 +212,36 @@ func (c *Client) addOperationCreateLaunchTemplateVersionMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go index d953f96c3..45e12b83d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go @@ -162,6 +162,36 @@ func (c *Client) addOperationCreateLocalGatewayRouteMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go index 870ed9970..f67456676 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go @@ -148,6 +148,36 @@ func (c *Client) addOperationCreateLocalGatewayRouteTableMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go index 82be8858a..8d2f9d0aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go @@ -152,6 +152,36 @@ func (c *Client) addOperationCreateLocalGatewayRouteTableVirtualInterfaceGroupAs if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go index 31a8a3f9e..27ddf8fab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go @@ -150,6 +150,36 @@ func (c *Client) addOperationCreateLocalGatewayRouteTableVpcAssociationMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterface.go index f0624808f..61816b632 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterface.go @@ -176,6 +176,36 @@ func (c *Client) addOperationCreateLocalGatewayVirtualInterfaceMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterfaceGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterfaceGroup.go index 245421c79..38fd24532 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterfaceGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayVirtualInterfaceGroup.go @@ -152,6 +152,36 @@ func (c *Client) addOperationCreateLocalGatewayVirtualInterfaceGroupMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateMacSystemIntegrityProtectionModificationTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateMacSystemIntegrityProtectionModificationTask.go new file mode 100644 index 000000000..2d80febc6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateMacSystemIntegrityProtectionModificationTask.go @@ -0,0 +1,312 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a System Integrity Protection (SIP) modification task to configure the +// SIP settings for an x86 Mac instance or Apple silicon Mac instance. For more +// information, see [Configure SIP for Amazon EC2 instances]in the Amazon EC2 User Guide. +// +// When you configure the SIP settings for your instance, you can either enable or +// disable all SIP settings, or you can specify a custom SIP configuration that +// selectively enables or disables specific SIP settings. +// +// If you implement a custom configuration, [connect to the instance and verify the settings] to ensure that your requirements are +// properly implemented and functioning as intended. +// +// SIP configurations might change with macOS updates. We recommend that you +// review custom SIP settings after any macOS version upgrade to ensure continued +// compatibility and proper functionality of your security configurations. +// +// To enable or disable all SIP settings, use the +// MacSystemIntegrityProtectionStatus parameter only. For example, to enable all +// SIP settings, specify the following: +// +// - MacSystemIntegrityProtectionStatus=enabled +// +// To specify a custom configuration that selectively enables or disables specific +// SIP settings, use the MacSystemIntegrityProtectionStatus parameter to enable or +// disable all SIP settings, and then use the +// MacSystemIntegrityProtectionConfiguration parameter to specify exceptions. In +// this case, the exceptions you specify for +// MacSystemIntegrityProtectionConfiguration override the value you specify for +// MacSystemIntegrityProtectionStatus. For example, to enable all SIP settings, +// except NvramProtections , specify the following: +// +// - MacSystemIntegrityProtectionStatus=enabled +// +// - MacSystemIntegrityProtectionConfigurationRequest "NvramProtections=disabled" +// +// [Configure SIP for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/mac-sip-settings.html#mac-sip-configure +// [connect to the instance and verify the settings]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/mac-sip-settings.html#mac-sip-check-settings +func (c *Client) CreateMacSystemIntegrityProtectionModificationTask(ctx context.Context, params *CreateMacSystemIntegrityProtectionModificationTaskInput, optFns ...func(*Options)) (*CreateMacSystemIntegrityProtectionModificationTaskOutput, error) { + if params == nil { + params = &CreateMacSystemIntegrityProtectionModificationTaskInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateMacSystemIntegrityProtectionModificationTask", params, optFns, c.addOperationCreateMacSystemIntegrityProtectionModificationTaskMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateMacSystemIntegrityProtectionModificationTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateMacSystemIntegrityProtectionModificationTaskInput struct { + + // The ID of the Amazon EC2 Mac instance. + // + // This member is required. + InstanceId *string + + // Specifies the overall SIP status for the instance. To enable all SIP settings, + // specify enabled . To disable all SIP settings, specify disabled . + // + // This member is required. + MacSystemIntegrityProtectionStatus types.MacSystemIntegrityProtectionSettingStatus + + // Unique, case-sensitive identifier that you provide to ensure the idempotency of + // the request. For more information, see [Ensuring Idempotency]. + // + // [Ensuring Idempotency]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html + ClientToken *string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // [Apple silicon Mac instances only] Specifies the following credentials: + // + // - Internal disk administrative user + // + // - Username - Only the default administrative user ( aws-managed-user ) is + // supported and it is used by default. You can't specify a different + // administrative user. + // + // - Password - If you did not change the default password for aws-managed-user , + // specify the default password, which is blank. Otherwise, specify your password. + // + // - Amazon EBS root volume administrative user + // + // - Username - If you did not change the default administrative user, specify + // ec2-user . Otherwise, specify the username for your administrative user. + // + // - Password - Specify the password for the administrative user. + // + // The credentials must be specified in the following JSON format: + // + // { "internalDiskPassword":"internal-disk-admin_password", + // "rootVolumeUsername":"root-volume-admin_username", + // "rootVolumepassword":"root-volume-admin_password" } + MacCredentials *string + + // Specifies the overrides to selectively enable or disable individual SIP + // settings. The individual settings you specify here override the overall SIP + // status you specify for MacSystemIntegrityProtectionStatus. + MacSystemIntegrityProtectionConfiguration *types.MacSystemIntegrityProtectionConfigurationRequest + + // Specifies tags to apply to the SIP modification task. + TagSpecifications []types.TagSpecification + + noSmithyDocumentSerde +} + +type CreateMacSystemIntegrityProtectionModificationTaskOutput struct { + + // Information about the SIP modification task. + MacModificationTask *types.MacModificationTask + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateMacSystemIntegrityProtectionModificationTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpCreateMacSystemIntegrityProtectionModificationTask{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateMacSystemIntegrityProtectionModificationTask{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateMacSystemIntegrityProtectionModificationTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateMacSystemIntegrityProtectionModificationTaskMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateMacSystemIntegrityProtectionModificationTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMacSystemIntegrityProtectionModificationTask(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateMacSystemIntegrityProtectionModificationTask struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateMacSystemIntegrityProtectionModificationTask) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateMacSystemIntegrityProtectionModificationTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateMacSystemIntegrityProtectionModificationTaskInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateMacSystemIntegrityProtectionModificationTaskInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateMacSystemIntegrityProtectionModificationTaskMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateMacSystemIntegrityProtectionModificationTask{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateMacSystemIntegrityProtectionModificationTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateMacSystemIntegrityProtectionModificationTask", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go index 5c44553f4..1943c7e36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go @@ -175,6 +175,36 @@ func (c *Client) addOperationCreateManagedPrefixListMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go index 7d566dbcb..1327ccc43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go @@ -220,6 +220,36 @@ func (c *Client) addOperationCreateNatGatewayMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go index ed5637d43..2307d4812 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go @@ -163,6 +163,36 @@ func (c *Client) addOperationCreateNetworkAclMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go index 314441d87..3db688c4f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go @@ -203,6 +203,36 @@ func (c *Client) addOperationCreateNetworkAclEntryMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go index 4e00ec7f0..f7f1b1075 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go @@ -167,6 +167,36 @@ func (c *Client) addOperationCreateNetworkInsightsAccessScopeMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go index 173e8f38f..97fd81646 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go @@ -191,6 +191,36 @@ func (c *Client) addOperationCreateNetworkInsightsPathMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go index 001085d14..faac80235 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go @@ -273,6 +273,36 @@ func (c *Client) addOperationCreateNetworkInterfaceMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go index 75b8ea79a..e3db24259 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go @@ -159,6 +159,36 @@ func (c *Client) addOperationCreateNetworkInterfacePermissionMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go index d8ad7048a..3162a9336 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go @@ -168,6 +168,36 @@ func (c *Client) addOperationCreatePlacementGroupMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go index b1dcd7534..d197ee3ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go @@ -153,6 +153,36 @@ func (c *Client) addOperationCreatePublicIpv4PoolMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go index 28b6037da..52f63153c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go @@ -211,6 +211,36 @@ func (c *Client) addOperationCreateReplaceRootVolumeTaskMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go index c7f958109..97a8a54b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go @@ -183,6 +183,36 @@ func (c *Client) addOperationCreateReservedInstancesListingMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go index 3bb8fa328..caa68591c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go @@ -15,13 +15,13 @@ import ( // created by using [CreateStoreImageTask]. // // To use this API, you must have the required permissions. For more information, -// see [Permissions for storing and restoring AMIs using Amazon S3]in the Amazon EC2 User Guide. +// see [Permissions for storing and restoring AMIs using S3]in the Amazon EC2 User Guide. // -// For more information, see [Store and restore an AMI using Amazon S3] in the Amazon EC2 User Guide. +// For more information, see [Store and restore an AMI using S3] in the Amazon EC2 User Guide. // // [CreateStoreImageTask]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html -// [Store and restore an AMI using Amazon S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html -// [Permissions for storing and restoring AMIs using Amazon S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions +// [Store and restore an AMI using S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html +// [Permissions for storing and restoring AMIs using S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/work-with-ami-store-restore.html#ami-s3-permissions func (c *Client) CreateRestoreImageTask(ctx context.Context, params *CreateRestoreImageTaskInput, optFns ...func(*Options)) (*CreateRestoreImageTaskOutput, error) { if params == nil { params = &CreateRestoreImageTaskInput{} @@ -171,6 +171,36 @@ func (c *Client) addOperationCreateRestoreImageTaskMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go index 419037bf2..488bf7797 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go @@ -99,6 +99,9 @@ type CreateRouteInput struct { // The ID of a network interface. NetworkInterfaceId *string + // The Amazon Resource Name (ARN) of the ODB network. + OdbNetworkArn *string + // The ID of a transit gateway. TransitGatewayId *string @@ -210,6 +213,36 @@ func (c *Client) addOperationCreateRouteMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServer.go index 29e46beed..f195cb9cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServer.go @@ -199,6 +199,36 @@ func (c *Client) addOperationCreateRouteServerMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerEndpoint.go index dd14f05e8..fad16c75d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerEndpoint.go @@ -165,6 +165,36 @@ func (c *Client) addOperationCreateRouteServerEndpointMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerPeer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerPeer.go index 5de7f3951..d6e4d7773 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerPeer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteServerPeer.go @@ -171,6 +171,36 @@ func (c *Client) addOperationCreateRouteServerPeerMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go index 5fa58ddda..5677b90f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go @@ -163,6 +163,36 @@ func (c *Client) addOperationCreateRouteTableMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go index 5c3c98472..b528d293b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go @@ -188,6 +188,36 @@ func (c *Client) addOperationCreateSecurityGroupMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go index 1e25c4f7c..c7c3167b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go @@ -218,7 +218,8 @@ type CreateSnapshotOutput struct { TransferType types.TransferType // The ID of the volume that was used to create the snapshot. Snapshots created by - // the CopySnapshotaction have an arbitrary volume ID that should not be used for any purpose. + // a copy snapshot operation have an arbitrary volume ID that you should not use + // for any purpose. VolumeId *string // The size of the volume, in GiB. @@ -318,6 +319,36 @@ func (c *Client) addOperationCreateSnapshotMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go index f6277b1ff..886377c5a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go @@ -69,7 +69,7 @@ type CreateSnapshotsInput struct { // - To create local snapshots in the same Local Zone as the source instance, // specify local . // - // - To create a regional snapshots in the parent Region of the Local Zone, + // - To create regional snapshots in the parent Region of the Local Zone, // specify regional or omit this parameter. // // Default value: regional @@ -195,6 +195,36 @@ func (c *Client) addOperationCreateSnapshotsMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go index 082030ba0..c71451376 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go @@ -154,6 +154,36 @@ func (c *Client) addOperationCreateSpotDatafeedSubscriptionMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go index 355a02112..766b2a763 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go @@ -14,12 +14,12 @@ import ( // Stores an AMI as a single object in an Amazon S3 bucket. // // To use this API, you must have the required permissions. For more information, -// see [Permissions for storing and restoring AMIs using Amazon S3]in the Amazon EC2 User Guide. +// see [Permissions for storing and restoring AMIs using S3]in the Amazon EC2 User Guide. // -// For more information, see [Store and restore an AMI using Amazon S3] in the Amazon EC2 User Guide. +// For more information, see [Store and restore an AMI using S3] in the Amazon EC2 User Guide. // -// [Store and restore an AMI using Amazon S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html -// [Permissions for storing and restoring AMIs using Amazon S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions +// [Store and restore an AMI using S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html +// [Permissions for storing and restoring AMIs using S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/work-with-ami-store-restore.html#ami-s3-permissions func (c *Client) CreateStoreImageTask(ctx context.Context, params *CreateStoreImageTaskInput, optFns ...func(*Options)) (*CreateStoreImageTaskOutput, error) { if params == nil { params = &CreateStoreImageTaskInput{} @@ -161,6 +161,36 @@ func (c *Client) addOperationCreateStoreImageTaskMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go index 9ee93678d..1f3e4a53a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go @@ -220,6 +220,36 @@ func (c *Client) addOperationCreateSubnetMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go index 0b7898d09..8b233e480 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go @@ -168,6 +168,36 @@ func (c *Client) addOperationCreateSubnetCidrReservationMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go index 5f0b00a71..d65db8c3d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go @@ -160,6 +160,36 @@ func (c *Client) addOperationCreateTagsMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go index 8470f9e74..62de5fd7d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go @@ -164,6 +164,36 @@ func (c *Client) addOperationCreateTrafficMirrorFilterMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go index 674a696de..526f24174 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go @@ -208,6 +208,36 @@ func (c *Client) addOperationCreateTrafficMirrorFilterRuleMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go index 57f827c89..39707a2b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go @@ -216,6 +216,36 @@ func (c *Client) addOperationCreateTrafficMirrorSessionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go index bb08112d3..e00f20951 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go @@ -177,6 +177,36 @@ func (c *Client) addOperationCreateTrafficMirrorTargetMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go index f0a85021b..084793f3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go @@ -161,6 +161,36 @@ func (c *Client) addOperationCreateTransitGatewayMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go index 3fa5ae971..c0c10a6fc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go @@ -156,6 +156,36 @@ func (c *Client) addOperationCreateTransitGatewayConnectMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go index 10c957d2b..321a915b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go @@ -177,6 +177,36 @@ func (c *Client) addOperationCreateTransitGatewayConnectPeerMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go index 7108a7350..0393af8ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go @@ -153,6 +153,36 @@ func (c *Client) addOperationCreateTransitGatewayMulticastDomainMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go index 9093d50c4..4ab5f584f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go @@ -168,6 +168,36 @@ func (c *Client) addOperationCreateTransitGatewayPeeringAttachmentMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go index bbb3a4f58..9d4ce176f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go @@ -146,6 +146,36 @@ func (c *Client) addOperationCreateTransitGatewayPolicyTableMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go index b961ec8c3..7d61a6173 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go @@ -154,6 +154,36 @@ func (c *Client) addOperationCreateTransitGatewayPrefixListReferenceMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go index ecc17331a..df8b0179b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go @@ -154,6 +154,36 @@ func (c *Client) addOperationCreateTransitGatewayRouteMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go index 433367ff3..5a1e20dba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go @@ -145,6 +145,36 @@ func (c *Client) addOperationCreateTransitGatewayRouteTableMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go index 838a336dd..cc50f71cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go @@ -150,6 +150,36 @@ func (c *Client) addOperationCreateTransitGatewayRouteTableAnnouncementMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go index 29cb317c7..1c74e745a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go @@ -168,6 +168,36 @@ func (c *Client) addOperationCreateTransitGatewayVpcAttachmentMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go index 5125ae6c9..a03867e7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go @@ -204,6 +204,36 @@ func (c *Client) addOperationCreateVerifiedAccessEndpointMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go index 279065e7c..0c348f2e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go @@ -168,6 +168,36 @@ func (c *Client) addOperationCreateVerifiedAccessGroupMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go index f1d2a7ca6..6ea0177c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go @@ -158,6 +158,36 @@ func (c *Client) addOperationCreateVerifiedAccessInstanceMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go index 0098b27ac..7aec8ad46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go @@ -187,6 +187,36 @@ func (c *Client) addOperationCreateVerifiedAccessTrustProviderMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go index 81cd8e4de..8414b7178 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go @@ -52,9 +52,15 @@ type CreateVolumeInput struct { // The ID of the Availability Zone in which to create the volume. For example, // us-east-1a . // - // This member is required. + // Either AvailabilityZone or AvailabilityZoneId must be specified, but not both. AvailabilityZone *string + // The ID of the Availability Zone in which to create the volume. For example, + // use1-az1 . + // + // Either AvailabilityZone or AvailabilityZoneId must be specified, but not both. + AvailabilityZoneId *string + // Unique, case-sensitive identifier that you provide to ensure the idempotency of // the request. For more information, see [Ensure Idempotency]. // @@ -236,6 +242,9 @@ type CreateVolumeOutput struct { // The Availability Zone for the volume. AvailabilityZone *string + // The ID of the Availability Zone for the volume. + AvailabilityZoneId *string + // The time stamp when volume creation was initiated. CreateTime *time.Time @@ -373,9 +382,6 @@ func (c *Client) addOperationCreateVolumeMiddlewares(stack *middleware.Stack, op if err = addIdempotencyToken_opCreateVolumeMiddleware(stack, options); err != nil { return err } - if err = addOpCreateVolumeValidationMiddleware(stack); err != nil { - return err - } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateVolume(options.Region), middleware.Before); err != nil { return err } @@ -394,6 +400,36 @@ func (c *Client) addOperationCreateVolumeMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go index c346a24cb..856cc51ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go @@ -223,6 +223,36 @@ func (c *Client) addOperationCreateVpcMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go index e9484cd77..5a05771de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go @@ -168,6 +168,36 @@ func (c *Client) addOperationCreateVpcBlockPublicAccessExclusionMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go index d03d9ae4a..704ba0321 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go @@ -219,6 +219,36 @@ func (c *Client) addOperationCreateVpcEndpointMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go index 0d795a017..719abc089 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go @@ -171,6 +171,36 @@ func (c *Client) addOperationCreateVpcEndpointConnectionNotificationMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go index 27caf777f..e3c9ca196 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go @@ -185,6 +185,36 @@ func (c *Client) addOperationCreateVpcEndpointServiceConfigurationMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go index cab19a1f6..95ed6d164 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go @@ -176,6 +176,36 @@ func (c *Client) addOperationCreateVpcPeeringConnectionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go index 1661a4572..0d8f88b07 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go @@ -69,6 +69,11 @@ type CreateVpnConnectionInput struct { // The options for the VPN connection. Options *types.VpnConnectionOptionsSpecification + // Specifies the storage mode for the pre-shared key (PSK). Valid values are + // Standard " (stored in the Site-to-Site VPN service) or SecretsManager (stored + // in Amazon Web Services Secrets Manager). + PreSharedKeyStorage *string + // The tags to apply to the VPN connection. TagSpecifications []types.TagSpecification @@ -183,6 +188,36 @@ func (c *Client) addOperationCreateVpnConnectionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go index 17db95b79..d665c3cdc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go @@ -145,6 +145,36 @@ func (c *Client) addOperationCreateVpnConnectionRouteMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go index a5951f0a0..5383c482e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go @@ -164,6 +164,36 @@ func (c *Client) addOperationCreateVpnGatewayMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go index 3a51bb375..cbe86f3b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDeleteCarrierGatewayMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go index 7c1f54322..6518c5177 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go @@ -143,6 +143,36 @@ func (c *Client) addOperationDeleteClientVpnEndpointMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go index 0568afee7..83d5916ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDeleteClientVpnRouteMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go index 674d27637..fa01de81c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go @@ -147,6 +147,36 @@ func (c *Client) addOperationDeleteCoipCidrMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go index 6c4e01453..5e25962bb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteCoipPoolMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go index 99055b42c..22a94d9ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go @@ -139,6 +139,36 @@ func (c *Client) addOperationDeleteCustomerGatewayMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go index 6268c2344..e7c54fe90 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go @@ -140,6 +140,36 @@ func (c *Client) addOperationDeleteDhcpOptionsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go index fba1d447d..9d352b7dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteEgressOnlyInternetGatewayMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go index 127a24e89..725b09d29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go @@ -192,6 +192,36 @@ func (c *Client) addOperationDeleteFleetsMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go index 13c315621..8fe620820 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeleteFlowLogsMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go index 2881935a2..5a308fb19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteFpgaImageMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteImageUsageReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteImageUsageReport.go new file mode 100644 index 000000000..1cc151a0b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteImageUsageReport.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified image usage report. +// +// For more information, see [View your AMI usage] in the Amazon EC2 User Guide. +// +// [View your AMI usage]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/your-ec2-ami-usage.html +func (c *Client) DeleteImageUsageReport(ctx context.Context, params *DeleteImageUsageReportInput, optFns ...func(*Options)) (*DeleteImageUsageReportOutput, error) { + if params == nil { + params = &DeleteImageUsageReportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteImageUsageReport", params, optFns, c.addOperationDeleteImageUsageReportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteImageUsageReportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteImageUsageReportInput struct { + + // The ID of the report to delete. + // + // This member is required. + ReportId *string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + noSmithyDocumentSerde +} + +type DeleteImageUsageReportOutput struct { + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteImageUsageReportMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDeleteImageUsageReport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDeleteImageUsageReport{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteImageUsageReport"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteImageUsageReportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteImageUsageReport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteImageUsageReport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteImageUsageReport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go index 72acbf30a..f89425d14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteInstanceConnectEndpointMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go index fd33b60e5..4a29b62ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDeleteInstanceEventWindowMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go index 5f31ceb9d..742766601 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go @@ -138,6 +138,36 @@ func (c *Client) addOperationDeleteInternetGatewayMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go index 3871ff155..0af7f2286 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go @@ -168,6 +168,36 @@ func (c *Client) addOperationDeleteIpamMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go index acc901e9f..f930c2ecd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go @@ -146,6 +146,36 @@ func (c *Client) addOperationDeleteIpamExternalResourceVerificationTokenMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go index 5f54a9f20..67f7a2137 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go @@ -160,6 +160,36 @@ func (c *Client) addOperationDeleteIpamPoolMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go index 2b1ad0006..fef64a964 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeleteIpamResourceDiscoveryMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go index 4f0c79ab2..8c1052c16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go @@ -146,6 +146,36 @@ func (c *Client) addOperationDeleteIpamScopeMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go index e8e64e0a1..8afdc8fd6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteKeyPairMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go index 7d51783a6..1d22d5eeb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go @@ -147,6 +147,36 @@ func (c *Client) addOperationDeleteLaunchTemplateMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go index 07b579ecb..9a51414e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go @@ -171,6 +171,36 @@ func (c *Client) addOperationDeleteLaunchTemplateVersionsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go index e8fb5add0..e0992ae4c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go @@ -149,6 +149,36 @@ func (c *Client) addOperationDeleteLocalGatewayRouteMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go index 95c9a6a7d..d746cd4e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteLocalGatewayRouteTableMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go index 0418ae14c..84d912416 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteLocalGatewayRouteTableVirtualInterfaceGroupAs if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go index b64ff1d3f..96ce340cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteLocalGatewayRouteTableVpcAssociationMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterface.go index f74f6b5cd..c96da4f29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterface.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteLocalGatewayVirtualInterfaceMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterfaceGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterfaceGroup.go index 134948014..e0e02d53b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterfaceGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayVirtualInterfaceGroup.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteLocalGatewayVirtualInterfaceGroupMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go index b06035076..3e3492809 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go @@ -143,6 +143,36 @@ func (c *Client) addOperationDeleteManagedPrefixListMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go index be0b89790..a1a550040 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeleteNatGatewayMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go index a73d44b09..2b3118871 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go @@ -138,6 +138,36 @@ func (c *Client) addOperationDeleteNetworkAclMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go index e2818bf9a..69e042a46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDeleteNetworkAclEntryMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go index d8de77338..dd72bfa4c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteNetworkInsightsAccessScopeMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go index 4ca144ef1..a33bb526c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteNetworkInsightsAccessScopeAnalysisMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go index 09f89e155..69aaf7729 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteNetworkInsightsAnalysisMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go index a041f51ac..4b04282d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteNetworkInsightsPathMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go index a6b72b5ad..f41a740ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go @@ -139,6 +139,36 @@ func (c *Client) addOperationDeleteNetworkInterfaceMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go index 18942c811..a930b891c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDeleteNetworkInterfacePermissionMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go index 265e1078e..140640f35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeletePlacementGroupMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go index 0932f9685..08dbb0532 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDeletePublicIpv4PoolMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go index 80704b3a1..82a9a384b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go @@ -145,6 +145,36 @@ func (c *Client) addOperationDeleteQueuedReservedInstancesMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go index 46460e9ec..616906f04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDeleteRouteMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServer.go index 5d858f4c5..60a5943f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServer.go @@ -166,6 +166,36 @@ func (c *Client) addOperationDeleteRouteServerMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerEndpoint.go index a5f1a0fca..a02a80bad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerEndpoint.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDeleteRouteServerEndpointMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerPeer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerPeer.go index e8cb7ad8d..3bbfc30e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerPeer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteServerPeer.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDeleteRouteServerPeerMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go index 81227066a..c8fff2a8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go @@ -138,6 +138,36 @@ func (c *Client) addOperationDeleteRouteTableMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go index 4422b1543..fd71d6c56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDeleteSecurityGroupMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go index f60ed99a0..d3b230620 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDeleteSnapshotMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go index fc1fef6f5..4a08dc5bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go @@ -130,6 +130,36 @@ func (c *Client) addOperationDeleteSpotDatafeedSubscriptionMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go index 192472bb5..c8f8b40cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go @@ -138,6 +138,36 @@ func (c *Client) addOperationDeleteSubnetMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go index 928b556d9..e1a8673c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteSubnetCidrReservationMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go index b08c304a6..15795706d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go @@ -158,6 +158,36 @@ func (c *Client) addOperationDeleteTagsMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go index 1244ed6ec..4022c4dbe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeleteTrafficMirrorFilterMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go index 37ba2eee5..b2d2ab7e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteTrafficMirrorFilterRuleMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go index 5da049274..3e755bace 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteTrafficMirrorSessionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go index 8d989afe7..1daf4544b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeleteTrafficMirrorTargetMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go index 1dbab0f91..187fe91b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go index eb5e51873..a137f4890 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go @@ -143,6 +143,36 @@ func (c *Client) addOperationDeleteTransitGatewayConnectMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go index 69bb9acec..f191da3ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayConnectPeerMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go index 4c9f83491..255e04030 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayMulticastDomainMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go index 7b93ab1fd..103ae92f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayPeeringAttachmentMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go index 787d20fbc..6c32d67de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayPolicyTableMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go index 1757471ad..b63d2f895 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDeleteTransitGatewayPrefixListReferenceMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go index 97c0b6355..58ce22bd2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go @@ -147,6 +147,36 @@ func (c *Client) addOperationDeleteTransitGatewayRouteMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go index 65a48c4a3..ed29e8078 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go @@ -145,6 +145,36 @@ func (c *Client) addOperationDeleteTransitGatewayRouteTableMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go index 330f164c0..065f3606e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayRouteTableAnnouncementMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go index 0e7cfc290..88c4370cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteTransitGatewayVpcAttachmentMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go index 071a31738..14a8ad9a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDeleteVerifiedAccessEndpointMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go index 2e159865d..f0ba0e063 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDeleteVerifiedAccessGroupMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go index b60833172..c132bf869 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDeleteVerifiedAccessInstanceMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go index 804127497..495615f38 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDeleteVerifiedAccessTrustProviderMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go index e6261caeb..1d3380d10 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeleteVolumeMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go index 626b1d095..41e3dbbfe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go @@ -146,6 +146,36 @@ func (c *Client) addOperationDeleteVpcMiddlewares(stack *middleware.Stack, optio if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go index e2b0a57e7..037f8053f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go @@ -149,6 +149,36 @@ func (c *Client) addOperationDeleteVpcBlockPublicAccessExclusionMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go index 608eba0bc..e4142f24b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go @@ -142,6 +142,36 @@ func (c *Client) addOperationDeleteVpcEndpointConnectionNotificationsMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go index 0edda6b47..d18403b95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go @@ -146,6 +146,36 @@ func (c *Client) addOperationDeleteVpcEndpointServiceConfigurationsMiddlewares(s if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go index dffbb307c..6a231798b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDeleteVpcEndpointsMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go index 197221c62..5f526b920 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go @@ -145,6 +145,36 @@ func (c *Client) addOperationDeleteVpcPeeringConnectionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go index 27040b521..3a4e18e19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDeleteVpnConnectionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go index 2748ce0bf..de908788e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go @@ -140,6 +140,36 @@ func (c *Client) addOperationDeleteVpnConnectionRouteMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go index c09165901..a8674158b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDeleteVpnGatewayMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go index 0f146b8cd..fa04d5b14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go @@ -15,8 +15,8 @@ import ( // Amazon Web Services resources through bring your own IP addresses (BYOIP) and // deletes the corresponding address pool. // -// Before you can release an address range, you must stop advertising it using WithdrawByoipCidr -// and you must not have any IP addresses allocated from its address range. +// Before you can release an address range, you must stop advertising it and you +// must not have any IP addresses allocated from its address range. func (c *Client) DeprovisionByoipCidr(ctx context.Context, params *DeprovisionByoipCidrInput, optFns ...func(*Options)) (*DeprovisionByoipCidrOutput, error) { if params == nil { params = &DeprovisionByoipCidrInput{} @@ -148,6 +148,36 @@ func (c *Client) addOperationDeprovisionByoipCidrMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go index bbef3e41d..07bbfa8a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDeprovisionIpamByoasnMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go index 930565b74..2ea2bdcff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go @@ -149,6 +149,36 @@ func (c *Client) addOperationDeprovisionIpamPoolCidrMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go index 2170f92d2..adeefedf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go @@ -28,10 +28,7 @@ func (c *Client) DeprovisionPublicIpv4PoolCidr(ctx context.Context, params *Depr type DeprovisionPublicIpv4PoolCidrInput struct { - // The CIDR you want to deprovision from the pool. Enter the CIDR you want to - // deprovision with a netmask of /32 . You must rerun this command for each IP - // address in the CIDR range. If your CIDR is a /24 , you will have to run this - // command to deprovision each of the 256 IP addresses in the /24 CIDR. + // The CIDR you want to deprovision from the pool. // // This member is required. Cidr *string @@ -152,6 +149,36 @@ func (c *Client) addOperationDeprovisionPublicIpv4PoolCidrMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go index e2030693d..d73559453 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go @@ -6,6 +6,7 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -17,16 +18,20 @@ import ( // to the Recycle Bin for the specified retention period. It can be restored before // its retention period expires, after which it is permanently deleted. If the // deregistered AMI doesn't match a retention rule, it is permanently deleted -// immediately. For more information, see [Recycle Bin]in the Amazon EBS User Guide. +// immediately. For more information, see [Recover deleted Amazon EBS snapshots and EBS-backed AMIs with Recycle Bin]in the Amazon EBS User Guide. +// +// When deregistering an EBS-backed AMI, you can optionally delete its associated +// snapshots at the same time. However, if a snapshot is associated with multiple +// AMIs, it won't be deleted even if specified for deletion, although the AMI will +// still be deregistered. // // Deregistering an AMI does not delete the following: // // - Instances already launched from the AMI. You'll continue to incur usage // costs for the instances until you terminate them. // -// - For EBS-backed AMIs: The snapshots that were created of the root and data -// volumes of the instance during AMI creation. You'll continue to incur snapshot -// storage costs. +// - For EBS-backed AMIs: Snapshots that are associated with multiple AMIs. +// You'll continue to incur snapshot storage costs. // // - For instance store-backed AMIs: The files uploaded to Amazon S3 during AMI // creation. You'll continue to incur S3 storage costs. @@ -34,7 +39,7 @@ import ( // For more information, see [Deregister an Amazon EC2 AMI] in the Amazon EC2 User Guide. // // [Deregister an Amazon EC2 AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html -// [Recycle Bin]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html +// [Recover deleted Amazon EBS snapshots and EBS-backed AMIs with Recycle Bin]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html func (c *Client) DeregisterImage(ctx context.Context, params *DeregisterImageInput, optFns ...func(*Options)) (*DeregisterImageOutput, error) { if params == nil { params = &DeregisterImageInput{} @@ -58,6 +63,15 @@ type DeregisterImageInput struct { // This member is required. ImageId *string + // Specifies whether to delete the snapshots associated with the AMI during + // deregistration. + // + // If a snapshot is associated with multiple AMIs, it is not deleted, regardless + // of this setting. + // + // Default: The snapshots are not deleted. + DeleteAssociatedSnapshots *bool + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is @@ -68,6 +82,14 @@ type DeregisterImageInput struct { } type DeregisterImageOutput struct { + + // The deletion result for each snapshot associated with the AMI, including the + // snapshot ID and its success or error code. + DeleteSnapshotResults []types.DeleteSnapshotReturnCode + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -162,6 +184,36 @@ func (c *Client) addOperationDeregisterImageMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go index 398d0d65b..f17602447 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go @@ -143,6 +143,36 @@ func (c *Client) addOperationDeregisterInstanceEventNotificationAttributesMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go index 7fb4bcf17..c24db20cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeregisterTransitGatewayMulticastGroupMembersMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go index 6430fb4c2..df28a2c04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDeregisterTransitGatewayMulticastGroupSourcesMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go index c8ab04279..ea014f7a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go @@ -161,6 +161,36 @@ func (c *Client) addOperationDescribeAccountAttributesMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go index fe741ed55..97f510391 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go @@ -160,6 +160,36 @@ func (c *Client) addOperationDescribeAddressTransfersMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go index 83a3a8f6c..f2b73a0bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go @@ -173,6 +173,36 @@ func (c *Client) addOperationDescribeAddressesMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go index a9d046804..0f1f76e99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDescribeAddressesAttributeMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go index 3db068c43..5fb9987a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDescribeAggregateIdFormatMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go index 24a09a759..09072bfd5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go @@ -196,6 +196,36 @@ func (c *Client) addOperationDescribeAvailabilityZonesMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go index b5fda910f..371fa406b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDescribeAwsNetworkPerformanceMetricSubscriptionsMid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go index 96cd8f1d4..3cd37a1d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go @@ -176,6 +176,36 @@ func (c *Client) addOperationDescribeBundleTasksMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go index 4fb42b255..772480fac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go @@ -11,10 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes the IP address ranges that were specified in calls to ProvisionByoipCidr. -// -// To describe the address pools that were created when you provisioned the -// address ranges, use DescribePublicIpv4Poolsor DescribeIpv6Pools. +// Describes the IP address ranges that were provisioned for use with Amazon Web +// Services resources through through bring your own IP addresses (BYOIP). func (c *Client) DescribeByoipCidrs(ctx context.Context, params *DescribeByoipCidrsInput, optFns ...func(*Options)) (*DescribeByoipCidrsOutput, error) { if params == nil { params = &DescribeByoipCidrsInput{} @@ -153,6 +151,36 @@ func (c *Client) addOperationDescribeByoipCidrsMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go index 587de5caf..f42daa494 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go @@ -170,6 +170,36 @@ func (c *Client) addOperationDescribeCapacityBlockExtensionHistoryMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go index e62032385..016853c07 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go @@ -162,6 +162,36 @@ func (c *Client) addOperationDescribeCapacityBlockExtensionOfferingsMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go index 1debff056..163420a1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go @@ -13,8 +13,8 @@ import ( ) // Describes Capacity Block offerings available for purchase in the Amazon Web -// Services Region that you're currently using. With Capacity Blocks, you purchase -// a specific instance type for a period of time. +// Services Region that you're currently using. With Capacity Blocks, you can +// purchase a specific GPU instance type or EC2 UltraServer for a period of time. // // To search for an available Capacity Block offering, you specify a reservation // duration and instance count. @@ -71,6 +71,12 @@ type DescribeCapacityBlockOfferingsInput struct { // The earliest start date for the Capacity Block offering. StartDateRange *time.Time + // The number of EC2 UltraServers in the offerings. + UltraserverCount *int32 + + // The EC2 UltraServer type of the Capacity Block offerings. + UltraserverType *string + noSmithyDocumentSerde } @@ -177,6 +183,36 @@ func (c *Client) addOperationDescribeCapacityBlockOfferingsMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockStatus.go new file mode 100644 index 000000000..3c35d897b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockStatus.go @@ -0,0 +1,312 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes the availability of capacity for the specified Capacity blocks, or +// all of your Capacity Blocks. +func (c *Client) DescribeCapacityBlockStatus(ctx context.Context, params *DescribeCapacityBlockStatusInput, optFns ...func(*Options)) (*DescribeCapacityBlockStatusOutput, error) { + if params == nil { + params = &DescribeCapacityBlockStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCapacityBlockStatus", params, optFns, c.addOperationDescribeCapacityBlockStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeCapacityBlockStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCapacityBlockStatusInput struct { + + // The ID of the Capacity Block. + CapacityBlockIds []string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // One or more filters. + // + // - interconnect-status - The status of the interconnect for the Capacity Block ( + // ok | impaired | insufficient-data ). + Filters []types.Filter + + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + MaxResults *int32 + + // The token to use to retrieve the next page of results. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeCapacityBlockStatusOutput struct { + + // The availability of capacity for a Capacity Block. + CapacityBlockStatuses []types.CapacityBlockStatus + + // The token to use to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCapacityBlockStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeCapacityBlockStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeCapacityBlockStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeCapacityBlockStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityBlockStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// DescribeCapacityBlockStatusPaginatorOptions is the paginator options for +// DescribeCapacityBlockStatus +type DescribeCapacityBlockStatusPaginatorOptions struct { + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeCapacityBlockStatusPaginator is a paginator for +// DescribeCapacityBlockStatus +type DescribeCapacityBlockStatusPaginator struct { + options DescribeCapacityBlockStatusPaginatorOptions + client DescribeCapacityBlockStatusAPIClient + params *DescribeCapacityBlockStatusInput + nextToken *string + firstPage bool +} + +// NewDescribeCapacityBlockStatusPaginator returns a new +// DescribeCapacityBlockStatusPaginator +func NewDescribeCapacityBlockStatusPaginator(client DescribeCapacityBlockStatusAPIClient, params *DescribeCapacityBlockStatusInput, optFns ...func(*DescribeCapacityBlockStatusPaginatorOptions)) *DescribeCapacityBlockStatusPaginator { + if params == nil { + params = &DescribeCapacityBlockStatusInput{} + } + + options := DescribeCapacityBlockStatusPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeCapacityBlockStatusPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeCapacityBlockStatusPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeCapacityBlockStatus page. +func (p *DescribeCapacityBlockStatusPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeCapacityBlockStatusOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeCapacityBlockStatus(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeCapacityBlockStatusAPIClient is a client that implements the +// DescribeCapacityBlockStatus operation. +type DescribeCapacityBlockStatusAPIClient interface { + DescribeCapacityBlockStatus(context.Context, *DescribeCapacityBlockStatusInput, ...func(*Options)) (*DescribeCapacityBlockStatusOutput, error) +} + +var _ DescribeCapacityBlockStatusAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeCapacityBlockStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeCapacityBlockStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlocks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlocks.go new file mode 100644 index 000000000..90e7e328d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlocks.go @@ -0,0 +1,326 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes details about Capacity Blocks in the Amazon Web Services Region that +// you're currently using. +func (c *Client) DescribeCapacityBlocks(ctx context.Context, params *DescribeCapacityBlocksInput, optFns ...func(*Options)) (*DescribeCapacityBlocksOutput, error) { + if params == nil { + params = &DescribeCapacityBlocksInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCapacityBlocks", params, optFns, c.addOperationDescribeCapacityBlocksMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeCapacityBlocksOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCapacityBlocksInput struct { + + // The IDs of the Capacity Blocks. + CapacityBlockIds []string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // One or more filters. + // + // - capacity-block-id - The ID of the Capacity Block. + // + // - ultraserver-type - The Capacity Block type. The type can be instances or + // ultraservers . + // + // - availability-zone - The Availability Zone of the Capacity Block. + // + // - start-date - The date and time at which the Capacity Block was started. + // + // - end-date - The date and time at which the Capacity Block expires. When a + // Capacity Block expires, all instances in the Capacity Block are terminated. + // + // - create-date - The date and time at which the Capacity Block was created. + // + // - state - The state of the Capacity Block ( active | expired | unavailable | + // cancelled | failed | scheduled | payment-pending | payment-failed ). + // + // - tags - The tags assigned to the Capacity Block. + Filters []types.Filter + + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + MaxResults *int32 + + // The token to use to retrieve the next page of results. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeCapacityBlocksOutput struct { + + // The Capacity Blocks. + CapacityBlocks []types.CapacityBlock + + // The token to use to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCapacityBlocksMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeCapacityBlocks{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeCapacityBlocks{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeCapacityBlocks"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityBlocks(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// DescribeCapacityBlocksPaginatorOptions is the paginator options for +// DescribeCapacityBlocks +type DescribeCapacityBlocksPaginatorOptions struct { + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeCapacityBlocksPaginator is a paginator for DescribeCapacityBlocks +type DescribeCapacityBlocksPaginator struct { + options DescribeCapacityBlocksPaginatorOptions + client DescribeCapacityBlocksAPIClient + params *DescribeCapacityBlocksInput + nextToken *string + firstPage bool +} + +// NewDescribeCapacityBlocksPaginator returns a new DescribeCapacityBlocksPaginator +func NewDescribeCapacityBlocksPaginator(client DescribeCapacityBlocksAPIClient, params *DescribeCapacityBlocksInput, optFns ...func(*DescribeCapacityBlocksPaginatorOptions)) *DescribeCapacityBlocksPaginator { + if params == nil { + params = &DescribeCapacityBlocksInput{} + } + + options := DescribeCapacityBlocksPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeCapacityBlocksPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeCapacityBlocksPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeCapacityBlocks page. +func (p *DescribeCapacityBlocksPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeCapacityBlocksOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeCapacityBlocks(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeCapacityBlocksAPIClient is a client that implements the +// DescribeCapacityBlocks operation. +type DescribeCapacityBlocksAPIClient interface { + DescribeCapacityBlocks(context.Context, *DescribeCapacityBlocksInput, ...func(*Options)) (*DescribeCapacityBlocksOutput, error) +} + +var _ DescribeCapacityBlocksAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeCapacityBlocks(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeCapacityBlocks", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go index 4e4bff5ac..d7d3bf392 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go @@ -183,6 +183,36 @@ func (c *Client) addOperationDescribeCapacityReservationBillingRequestsMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go index 1ed6c8482..23014717d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeCapacityReservationFleetsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go index b07e2047a..3a219ca9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go @@ -231,6 +231,36 @@ func (c *Client) addOperationDescribeCapacityReservationsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go index 2adcf8c45..2605115f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go @@ -169,6 +169,36 @@ func (c *Client) addOperationDescribeCarrierGatewaysMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go index 6d7aa7170..63c81b036 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go @@ -176,6 +176,36 @@ func (c *Client) addOperationDescribeClassicLinkInstancesMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go index 65e292c7b..9c9c9be6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeClientVpnAuthorizationRulesMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go index f6029c655..e66c94438 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go @@ -163,6 +163,36 @@ func (c *Client) addOperationDescribeClientVpnConnectionsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go index 7b41afd5e..7efc96880 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go @@ -156,6 +156,36 @@ func (c *Client) addOperationDescribeClientVpnEndpointsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go index 5216d44d4..f502481c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go @@ -164,6 +164,36 @@ func (c *Client) addOperationDescribeClientVpnRoutesMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go index 4b586cfbb..f42c1bac1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go @@ -166,6 +166,36 @@ func (c *Client) addOperationDescribeClientVpnTargetNetworksMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go index 6da91482e..b92ed8203 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go @@ -157,6 +157,36 @@ func (c *Client) addOperationDescribeCoipPoolsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go index 17d0d5a55..1197d246b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go @@ -146,6 +146,36 @@ func (c *Client) addOperationDescribeConversionTasksMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go index af39ca3cf..d7736e95b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go @@ -174,6 +174,36 @@ func (c *Client) addOperationDescribeCustomerGatewaysMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go index f17fce607..403150759 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go @@ -164,6 +164,36 @@ func (c *Client) addOperationDescribeDeclarativePoliciesReportsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go index 5d9cd01c1..041e2bcda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go @@ -179,6 +179,36 @@ func (c *Client) addOperationDescribeDhcpOptionsMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go index 1e3a4d09b..4a0f44e6c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go @@ -166,6 +166,36 @@ func (c *Client) addOperationDescribeEgressOnlyInternetGatewaysMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go index 1daa57499..3e1dab48a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go @@ -174,6 +174,36 @@ func (c *Client) addOperationDescribeElasticGpusMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go index 0fb2842c4..1c5ca32fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDescribeExportImageTasksMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go index 4ce00435f..801e77557 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go @@ -138,6 +138,36 @@ func (c *Client) addOperationDescribeExportTasksMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go index 0494a6d4b..a4053d28a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go @@ -162,6 +162,36 @@ func (c *Client) addOperationDescribeFastLaunchImagesMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go index 77ff4fbd1..edb5efa95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go @@ -162,6 +162,36 @@ func (c *Client) addOperationDescribeFastSnapshotRestoresMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go index 7c6f12838..91650e377 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go @@ -188,6 +188,36 @@ func (c *Client) addOperationDescribeFleetHistoryMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go index a0febff4d..6da742825 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go @@ -173,6 +173,36 @@ func (c *Client) addOperationDescribeFleetInstancesMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go index a32d0218c..71dcf22b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go @@ -180,6 +180,36 @@ func (c *Client) addOperationDescribeFleetsMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go index d4dfabd3a..5c786863f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go @@ -182,6 +182,36 @@ func (c *Client) addOperationDescribeFlowLogsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go index 5118f81c4..0b7a1c13c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go @@ -147,6 +147,36 @@ func (c *Client) addOperationDescribeFpgaImageAttributeMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go index 0b4a9ae2c..8d393c061 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go @@ -184,6 +184,36 @@ func (c *Client) addOperationDescribeFpgaImagesMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go index 618f462db..4537d270f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go @@ -175,6 +175,36 @@ func (c *Client) addOperationDescribeHostReservationOfferingsMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go index 691768d3a..35edb7e3d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go @@ -163,6 +163,36 @@ func (c *Client) addOperationDescribeHostReservationsMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go index 07758c6fe..616ca606f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go @@ -172,6 +172,36 @@ func (c *Client) addOperationDescribeHostsMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go index 1a05ef069..38226d1aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDescribeIamInstanceProfileAssociationsMiddlewares(s if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go index 58dbb612f..b69641271 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go @@ -157,6 +157,36 @@ func (c *Client) addOperationDescribeIdFormatMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go index 4753f2350..5e984d626 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeIdentityIdFormatMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go index c8c98eb35..1864126a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go @@ -39,7 +39,7 @@ type DescribeImageAttributeInput struct { // // Note: The blockDeviceMapping attribute is deprecated. Using this attribute // returns the Client.AuthFailure error. To get information about the block device - // mappings for an AMI, use the DescribeImagesaction. + // mappings for an AMI, describe the image instead. // // This member is required. Attribute types.ImageAttributeName @@ -115,9 +115,9 @@ type DescribeImageAttributeOutput struct { // Base64 representation of the non-volatile UEFI variable store. To retrieve the // UEFI data, use the [GetInstanceUefiData]command. You can inspect and modify the UEFI data by using - // the [python-uefivars tool]on GitHub. For more information, see [UEFI Secure Boot] in the Amazon EC2 User Guide. + // the [python-uefivars tool]on GitHub. For more information, see [UEFI Secure Boot for Amazon EC2 instances] in the Amazon EC2 User Guide. // - // [UEFI Secure Boot]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html + // [UEFI Secure Boot for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html // [GetInstanceUefiData]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceUefiData // [python-uefivars tool]: https://github.com/awslabs/python-uefivars UefiData *types.AttributeValue @@ -216,6 +216,36 @@ func (c *Client) addOperationDescribeImageAttributeMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go new file mode 100644 index 000000000..014f031c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageReferences.go @@ -0,0 +1,334 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes your Amazon Web Services resources that are referencing the specified +// images. +// +// For more information, see [Identify your resources referencing specified AMIs] in the Amazon EC2 User Guide. +// +// [Identify your resources referencing specified AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-ami-references.html +func (c *Client) DescribeImageReferences(ctx context.Context, params *DescribeImageReferencesInput, optFns ...func(*Options)) (*DescribeImageReferencesOutput, error) { + if params == nil { + params = &DescribeImageReferencesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageReferences", params, optFns, c.addOperationDescribeImageReferencesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageReferencesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageReferencesInput struct { + + // The IDs of the images to check for resource references. + // + // This member is required. + ImageIds []string + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // Specifies whether to check all supported Amazon Web Services resource types for + // image references. When specified, default values are applied for + // ResourceTypeOptions . For the default values, see [How AMI reference checks work] in the Amazon EC2 User + // Guide. If you also specify ResourceTypes with ResourceTypeOptions , your + // specified values override the default values. + // + // Supported resource types: ec2:Instance | ec2:LaunchTemplate | ssm:Parameter | + // imagebuilder:ImageRecipe | imagebuilder:ContainerRecipe + // + // Either IncludeAllResourceTypes or ResourceTypes must be specified. + // + // [How AMI reference checks work]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-ami-references-works.html + IncludeAllResourceTypes *bool + + // The maximum number of items to return for this request. To get the next page + // of items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + MaxResults *int32 + + // The token returned from a previous paginated request. Pagination continues from + // the end of the items returned by the previous request. + NextToken *string + + // The Amazon Web Services resource types to check for image references. + // + // Either IncludeAllResourceTypes or ResourceTypes must be specified. + ResourceTypes []types.ResourceTypeRequest + + noSmithyDocumentSerde +} + +type DescribeImageReferencesOutput struct { + + // The resources that are referencing the specified images. + ImageReferences []types.ImageReference + + // The token to include in another request to get the next page of items. This + // value is null when there are no more items to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageReferencesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeImageReferences{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeImageReferences{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageReferences"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDescribeImageReferencesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageReferences(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// DescribeImageReferencesPaginatorOptions is the paginator options for +// DescribeImageReferences +type DescribeImageReferencesPaginatorOptions struct { + // The maximum number of items to return for this request. To get the next page + // of items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImageReferencesPaginator is a paginator for DescribeImageReferences +type DescribeImageReferencesPaginator struct { + options DescribeImageReferencesPaginatorOptions + client DescribeImageReferencesAPIClient + params *DescribeImageReferencesInput + nextToken *string + firstPage bool +} + +// NewDescribeImageReferencesPaginator returns a new +// DescribeImageReferencesPaginator +func NewDescribeImageReferencesPaginator(client DescribeImageReferencesAPIClient, params *DescribeImageReferencesInput, optFns ...func(*DescribeImageReferencesPaginatorOptions)) *DescribeImageReferencesPaginator { + if params == nil { + params = &DescribeImageReferencesInput{} + } + + options := DescribeImageReferencesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImageReferencesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImageReferencesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImageReferences page. +func (p *DescribeImageReferencesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageReferencesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeImageReferences(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeImageReferencesAPIClient is a client that implements the +// DescribeImageReferences operation. +type DescribeImageReferencesAPIClient interface { + DescribeImageReferences(context.Context, *DescribeImageReferencesInput, ...func(*Options)) (*DescribeImageReferencesOutput, error) +} + +var _ DescribeImageReferencesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeImageReferences(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeImageReferences", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReportEntries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReportEntries.go new file mode 100644 index 000000000..56276b739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReportEntries.go @@ -0,0 +1,327 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes the entries in image usage reports, showing how your images are used +// across other Amazon Web Services accounts. +// +// For more information, see [View your AMI usage] in the Amazon EC2 User Guide. +// +// [View your AMI usage]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/your-ec2-ami-usage.html +func (c *Client) DescribeImageUsageReportEntries(ctx context.Context, params *DescribeImageUsageReportEntriesInput, optFns ...func(*Options)) (*DescribeImageUsageReportEntriesOutput, error) { + if params == nil { + params = &DescribeImageUsageReportEntriesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageUsageReportEntries", params, optFns, c.addOperationDescribeImageUsageReportEntriesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageUsageReportEntriesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageUsageReportEntriesInput struct { + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // The filters. + // + // - account-id - A 12-digit Amazon Web Services account ID. + // + // - creation-time - The time when the report was created, in the ISO 8601 format + // in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, + // 2025-11-29T11:04:43.305Z . You can use a wildcard ( * ), for example, + // 2025-11-29T* , which matches an entire day. + // + // - resource-type - The resource type ( ec2:Instance | ec2:LaunchTemplate ). + Filters []types.Filter + + // The IDs of the images for filtering the report entries. If specified, only + // report entries containing these images are returned. + ImageIds []string + + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + MaxResults *int32 + + // The token returned from a previous paginated request. Pagination continues from + // the end of the items returned by the previous request. + NextToken *string + + // The IDs of the usage reports. + ReportIds []string + + noSmithyDocumentSerde +} + +type DescribeImageUsageReportEntriesOutput struct { + + // The content of the usage reports. + ImageUsageReportEntries []types.ImageUsageReportEntry + + // The token to include in another request to get the next page of items. This + // value is null when there are no more items to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageUsageReportEntriesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeImageUsageReportEntries{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeImageUsageReportEntries{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageUsageReportEntries"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageUsageReportEntries(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// DescribeImageUsageReportEntriesPaginatorOptions is the paginator options for +// DescribeImageUsageReportEntries +type DescribeImageUsageReportEntriesPaginatorOptions struct { + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImageUsageReportEntriesPaginator is a paginator for +// DescribeImageUsageReportEntries +type DescribeImageUsageReportEntriesPaginator struct { + options DescribeImageUsageReportEntriesPaginatorOptions + client DescribeImageUsageReportEntriesAPIClient + params *DescribeImageUsageReportEntriesInput + nextToken *string + firstPage bool +} + +// NewDescribeImageUsageReportEntriesPaginator returns a new +// DescribeImageUsageReportEntriesPaginator +func NewDescribeImageUsageReportEntriesPaginator(client DescribeImageUsageReportEntriesAPIClient, params *DescribeImageUsageReportEntriesInput, optFns ...func(*DescribeImageUsageReportEntriesPaginatorOptions)) *DescribeImageUsageReportEntriesPaginator { + if params == nil { + params = &DescribeImageUsageReportEntriesInput{} + } + + options := DescribeImageUsageReportEntriesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImageUsageReportEntriesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImageUsageReportEntriesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImageUsageReportEntries page. +func (p *DescribeImageUsageReportEntriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageUsageReportEntriesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeImageUsageReportEntries(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeImageUsageReportEntriesAPIClient is a client that implements the +// DescribeImageUsageReportEntries operation. +type DescribeImageUsageReportEntriesAPIClient interface { + DescribeImageUsageReportEntries(context.Context, *DescribeImageUsageReportEntriesInput, ...func(*Options)) (*DescribeImageUsageReportEntriesOutput, error) +} + +var _ DescribeImageUsageReportEntriesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeImageUsageReportEntries(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeImageUsageReportEntries", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReports.go new file mode 100644 index 000000000..190c7dc9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageUsageReports.go @@ -0,0 +1,541 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "time" +) + +// Describes the configuration and status of image usage reports, filtered by +// report IDs or image IDs. +// +// For more information, see [View your AMI usage] in the Amazon EC2 User Guide. +// +// [View your AMI usage]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/your-ec2-ami-usage.html +func (c *Client) DescribeImageUsageReports(ctx context.Context, params *DescribeImageUsageReportsInput, optFns ...func(*Options)) (*DescribeImageUsageReportsOutput, error) { + if params == nil { + params = &DescribeImageUsageReportsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeImageUsageReports", params, optFns, c.addOperationDescribeImageUsageReportsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeImageUsageReportsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeImageUsageReportsInput struct { + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // The filters. + // + // - creation-time - The time when the report was created, in the ISO 8601 format + // in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, + // 2025-11-29T11:04:43.305Z . You can use a wildcard ( * ), for example, + // 2025-11-29T* , which matches an entire day. + // + // - state - The state of the report ( available | pending | error ). + Filters []types.Filter + + // The IDs of the images for filtering the reports. If specified, only reports + // containing these images are returned. + ImageIds []string + + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + MaxResults *int32 + + // The token returned from a previous paginated request. Pagination continues from + // the end of the items returned by the previous request. + NextToken *string + + // The IDs of the image usage reports. + ReportIds []string + + noSmithyDocumentSerde +} + +type DescribeImageUsageReportsOutput struct { + + // The image usage reports. + ImageUsageReports []types.ImageUsageReport + + // The token to include in another request to get the next page of items. This + // value is null when there are no more items to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeImageUsageReportsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeImageUsageReports{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeImageUsageReports{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImageUsageReports"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImageUsageReports(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ImageUsageReportAvailableWaiterOptions are waiter options for +// ImageUsageReportAvailableWaiter +type ImageUsageReportAvailableWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // ImageUsageReportAvailableWaiter will use default minimum delay of 15 seconds. + // Note that MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ImageUsageReportAvailableWaiter will use default max delay of 120 + // seconds. Note that MaxDelay must resolve to value greater than or equal to the + // MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeImageUsageReportsInput, *DescribeImageUsageReportsOutput, error) (bool, error) +} + +// ImageUsageReportAvailableWaiter defines the waiters for +// ImageUsageReportAvailable +type ImageUsageReportAvailableWaiter struct { + client DescribeImageUsageReportsAPIClient + + options ImageUsageReportAvailableWaiterOptions +} + +// NewImageUsageReportAvailableWaiter constructs a ImageUsageReportAvailableWaiter. +func NewImageUsageReportAvailableWaiter(client DescribeImageUsageReportsAPIClient, optFns ...func(*ImageUsageReportAvailableWaiterOptions)) *ImageUsageReportAvailableWaiter { + options := ImageUsageReportAvailableWaiterOptions{} + options.MinDelay = 15 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = imageUsageReportAvailableStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &ImageUsageReportAvailableWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for ImageUsageReportAvailable waiter. The +// maxWaitDur is the maximum wait duration the waiter will wait. The maxWaitDur is +// required and must be greater than zero. +func (w *ImageUsageReportAvailableWaiter) Wait(ctx context.Context, params *DescribeImageUsageReportsInput, maxWaitDur time.Duration, optFns ...func(*ImageUsageReportAvailableWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for ImageUsageReportAvailable waiter +// and returns the output of the successful operation. The maxWaitDur is the +// maximum wait duration the waiter will wait. The maxWaitDur is required and must +// be greater than zero. +func (w *ImageUsageReportAvailableWaiter) WaitForOutput(ctx context.Context, params *DescribeImageUsageReportsInput, maxWaitDur time.Duration, optFns ...func(*ImageUsageReportAvailableWaiterOptions)) (*DescribeImageUsageReportsOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeImageUsageReports(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for ImageUsageReportAvailable waiter") +} + +func imageUsageReportAvailableStateRetryable(ctx context.Context, input *DescribeImageUsageReportsInput, output *DescribeImageUsageReportsOutput, err error) (bool, error) { + + if err == nil { + v1 := output.ImageUsageReports + var v2 []string + for _, v := range v1 { + v3 := v.State + if v3 != nil { + v2 = append(v2, *v3) + } + } + expectedValue := "available" + match := len(v2) > 0 + for _, v := range v2 { + if string(v) != expectedValue { + match = false + break + } + } + + if match { + return false, nil + } + } + + if err == nil { + v1 := output.ImageUsageReports + var v2 []string + for _, v := range v1 { + v3 := v.State + if v3 != nil { + v2 = append(v2, *v3) + } + } + expectedValue := "failed" + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break + } + } + + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + + if err != nil { + return false, err + } + return true, nil +} + +// DescribeImageUsageReportsPaginatorOptions is the paginator options for +// DescribeImageUsageReports +type DescribeImageUsageReportsPaginatorOptions struct { + // The maximum number of items to return for this request. To get the next page of + // items, make another request with the token returned in the output. For more + // information, see [Pagination]. + // + // [Pagination]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Query-Requests.html#api-pagination + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeImageUsageReportsPaginator is a paginator for DescribeImageUsageReports +type DescribeImageUsageReportsPaginator struct { + options DescribeImageUsageReportsPaginatorOptions + client DescribeImageUsageReportsAPIClient + params *DescribeImageUsageReportsInput + nextToken *string + firstPage bool +} + +// NewDescribeImageUsageReportsPaginator returns a new +// DescribeImageUsageReportsPaginator +func NewDescribeImageUsageReportsPaginator(client DescribeImageUsageReportsAPIClient, params *DescribeImageUsageReportsInput, optFns ...func(*DescribeImageUsageReportsPaginatorOptions)) *DescribeImageUsageReportsPaginator { + if params == nil { + params = &DescribeImageUsageReportsInput{} + } + + options := DescribeImageUsageReportsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeImageUsageReportsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeImageUsageReportsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeImageUsageReports page. +func (p *DescribeImageUsageReportsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeImageUsageReportsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeImageUsageReports(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeImageUsageReportsAPIClient is a client that implements the +// DescribeImageUsageReports operation. +type DescribeImageUsageReportsAPIClient interface { + DescribeImageUsageReports(context.Context, *DescribeImageUsageReportsInput, ...func(*Options)) (*DescribeImageUsageReportsOutput, error) +} + +var _ DescribeImageUsageReportsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeImageUsageReports(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeImageUsageReports", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go index 2ba734d75..6bc179d7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go @@ -33,7 +33,12 @@ import ( // results, with the imageAllowed field set to true for each image. In audit-mode , // the imageAllowed field is set to true for images that meet the account's // Allowed AMIs criteria, and false for images that don't meet the criteria. For -// more information, see EnableAllowedImagesSettings. +// more information, see [Allowed AMIs]. +// +// The Amazon EC2 API follows an eventual consistency model. This means that the +// result of an API command you run that creates or modifies resources might not be +// immediately available to all subsequent commands you run. For guidance on how to +// manage eventual consistency, see [Eventual consistency in the Amazon EC2 API]in the Amazon EC2 Developer Guide. // // We strongly recommend using only paginated requests. Unpaginated requests are // susceptible to throttling and timeouts. @@ -41,6 +46,9 @@ import ( // The order of the elements in the response, including those within nested // structures, might vary. Applications should not assume the elements appear in a // particular order. +// +// [Allowed AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-allowed-amis.html +// [Eventual consistency in the Amazon EC2 API]: https://docs.aws.amazon.com/ec2/latest/devguide/eventual-consistency.html func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { if params == nil { params = &DescribeImagesInput{} @@ -112,6 +120,9 @@ type DescribeImagesInput struct { // - ena-support - A Boolean that indicates whether enhanced networking with ENA // is enabled. // + // - free-tier-eligible - A Boolean that indicates whether this image can be used + // under the Amazon Web Services Free Tier ( true | false ). + // // - hypervisor - The hypervisor type ( ovm | xen ). // // - image-allowed - A Boolean that indicates whether the image meets the @@ -319,6 +330,36 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go index 39339ad12..05b2812ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDescribeImportImageTasksMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go index d29680d29..278462684 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go @@ -155,6 +155,36 @@ func (c *Client) addOperationDescribeImportSnapshotTasksMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go index 4c5bba14f..499c466e2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go @@ -204,6 +204,36 @@ func (c *Client) addOperationDescribeInstanceAttributeMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go index d3c83cfe8..9f144363d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go @@ -179,6 +179,36 @@ func (c *Client) addOperationDescribeInstanceConnectEndpointsMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go index efd0476b3..1e909b897 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go @@ -188,6 +188,36 @@ func (c *Client) addOperationDescribeInstanceCreditSpecificationsMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go index ff3f7f984..63938118e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go @@ -135,6 +135,36 @@ func (c *Client) addOperationDescribeInstanceEventNotificationAttributesMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go index d7286441c..bea47ad80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go @@ -194,6 +194,36 @@ func (c *Client) addOperationDescribeInstanceEventWindowsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go index 4a7901896..f16cf6c75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go @@ -222,6 +222,36 @@ func (c *Client) addOperationDescribeInstanceImageMetadataMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go index 407713a10..1182d15dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go @@ -75,6 +75,8 @@ type DescribeInstanceStatusInput struct { // // - availability-zone - The Availability Zone of the instance. // + // - availability-zone-id - The ID of the Availability Zone of the instance. + // // - event.code - The code for the scheduled event ( instance-reboot | // system-reboot | system-maintenance | instance-retirement | instance-stop ). // @@ -253,6 +255,36 @@ func (c *Client) addOperationDescribeInstanceStatusMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go index f986eb4b6..7bba84b7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go @@ -16,29 +16,21 @@ import ( // information to determine the relative proximity of your EC2 instances within the // Amazon Web Services network to support your tightly coupled workloads. // -// Limitations +// Instance topology is supported for specific instance types only. For more +// information, see [Prerequisites for Amazon EC2 instance topology]in the Amazon EC2 User Guide. // -// - Supported zones -// -// - Availability Zone -// -// - Local Zone -// -// - Supported instance types -// -// - hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | -// hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | -// hpc7g.16xlarge -// -// - p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge | -// p5en.48xlarge -// -// - trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge | trn2.48xlarge | -// trn2u.48xlarge +// The Amazon EC2 API follows an eventual consistency model due to the distributed +// nature of the system supporting it. As a result, when you call the +// DescribeInstanceTopology API command immediately after launching instances, the +// response might return a null value for capacityBlockId because the data might +// not have fully propagated across all subsystems. For more information, see [Eventual consistency in the Amazon EC2 API]in +// the Amazon EC2 Developer Guide. // // For more information, see [Amazon EC2 instance topology] in the Amazon EC2 User Guide. // +// [Prerequisites for Amazon EC2 instance topology]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-topology-prerequisites.html // [Amazon EC2 instance topology]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-topology.html +// [Eventual consistency in the Amazon EC2 API]: https://docs.aws.amazon.com/ec2/latest/devguide/eventual-consistency.html func (c *Client) DescribeInstanceTopology(ctx context.Context, params *DescribeInstanceTopologyInput, optFns ...func(*Options)) (*DescribeInstanceTopologyOutput, error) { if params == nil { params = &DescribeInstanceTopologyInput{} @@ -207,6 +199,36 @@ func (c *Client) addOperationDescribeInstanceTopologyMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go index 6e09eb761..eb957f867 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go @@ -176,6 +176,36 @@ func (c *Client) addOperationDescribeInstanceTypeOfferingsMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go index 3bc3baa08..282e40038 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go @@ -80,8 +80,8 @@ type DescribeInstanceTypesInput struct { // - ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) // is supported for EBS volumes ( required | supported | unsupported ). // - // - free-tier-eligible - Indicates whether the instance type is eligible to use - // in the free tier ( true | false ). + // - free-tier-eligible - A Boolean that indicates whether this instance type can + // be used under the Amazon Web Services Free Tier ( true | false ). // // - hibernation-supported - Indicates whether On-Demand hibernation is supported // ( true | false ). @@ -166,6 +166,9 @@ type DescribeInstanceTypesInput struct { // - processor-info.supported-features - The supported CPU features ( amd-sev-snp // ). // + // - reboot-migration-support - Indicates whether enabling reboot migration is + // supported ( supported | unsupported ). + // // - supported-boot-mode - The boot mode ( legacy-bios | uefi ). // // - supported-root-device-type - The root device type ( ebs | instance-store ). @@ -307,6 +310,36 @@ func (c *Client) addOperationDescribeInstanceTypesMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go index 95f59c7fc..6a559b921 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go @@ -83,6 +83,8 @@ type DescribeInstancesInput struct { // // - availability-zone - The Availability Zone of the instance. // + // - availability-zone-id - The ID of the Availability Zone of the instance. + // // - block-device-mapping.attach-time - The attach time for an EBS volume mapped // to the instance, for example, 2022-09-15T17:15:20.000Z . // @@ -599,6 +601,36 @@ func (c *Client) addOperationDescribeInstancesMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go index b8a246ec4..e93e27ee8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go @@ -183,6 +183,36 @@ func (c *Client) addOperationDescribeInternetGatewaysMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go index 73ff267a1..030f597a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go @@ -149,6 +149,36 @@ func (c *Client) addOperationDescribeIpamByoasnMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go index a86e9b168..85feecfd2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go @@ -176,6 +176,36 @@ func (c *Client) addOperationDescribeIpamExternalResourceVerificationTokensMiddl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go index 471e20f9f..af2a28250 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDescribeIpamPoolsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go index a05766632..7777060fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDescribeIpamResourceDiscoveriesMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go index 0eb7134e1..7eba2db59 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDescribeIpamResourceDiscoveryAssociationsMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go index 2dfa57ea8..303b8084c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDescribeIpamScopesMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go index 2e071f051..31d321691 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go @@ -156,6 +156,36 @@ func (c *Client) addOperationDescribeIpamsMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go index efc198981..da38d3950 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go @@ -159,6 +159,36 @@ func (c *Client) addOperationDescribeIpv6PoolsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go index 13f13a7b2..6b14d6efa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go @@ -174,6 +174,36 @@ func (c *Client) addOperationDescribeKeyPairsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go index 8ada63ca1..a8280b502 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go @@ -237,6 +237,36 @@ func (c *Client) addOperationDescribeLaunchTemplateVersionsMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go index 9e972034c..3d0acc211 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go @@ -167,6 +167,36 @@ func (c *Client) addOperationDescribeLaunchTemplatesMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go index 2779c2a8e..3d1c3a9e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go @@ -170,6 +170,36 @@ func (c *Client) addOperationDescribeLocalGatewayRouteTableVirtualInterfaceGroup if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go index 3bee2c82a..cc700f40b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go @@ -168,6 +168,36 @@ func (c *Client) addOperationDescribeLocalGatewayRouteTableVpcAssociationsMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go index 08f7a1fa2..493b25f50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go @@ -166,6 +166,36 @@ func (c *Client) addOperationDescribeLocalGatewayRouteTablesMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go index 0c23a9035..a1a181be4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go @@ -161,6 +161,36 @@ func (c *Client) addOperationDescribeLocalGatewayVirtualInterfaceGroupsMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go index 324ccdeaf..a176f1125 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go @@ -169,6 +169,36 @@ func (c *Client) addOperationDescribeLocalGatewayVirtualInterfacesMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go index 149c9a316..0f3d3e2ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go @@ -161,6 +161,36 @@ func (c *Client) addOperationDescribeLocalGatewaysMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go index 5ad0e5f0e..4a8f95f24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go @@ -158,6 +158,36 @@ func (c *Client) addOperationDescribeLockedSnapshotsMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go index d5a8111a0..bae645a3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDescribeMacHostsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacModificationTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacModificationTasks.go new file mode 100644 index 000000000..293c293db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacModificationTasks.go @@ -0,0 +1,323 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes a System Integrity Protection (SIP) modification task or volume +// ownership delegation task for an Amazon EC2 Mac instance. For more information, +// see [Configure SIP for Amazon EC2 instances]in the Amazon EC2 User Guide. +// +// [Configure SIP for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/mac-sip-settings.html#mac-sip-configure +func (c *Client) DescribeMacModificationTasks(ctx context.Context, params *DescribeMacModificationTasksInput, optFns ...func(*Options)) (*DescribeMacModificationTasksOutput, error) { + if params == nil { + params = &DescribeMacModificationTasksInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeMacModificationTasks", params, optFns, c.addOperationDescribeMacModificationTasksMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeMacModificationTasksOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeMacModificationTasksInput struct { + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // Specifies one or more filters for the request: + // + // - instance-id - The ID of the instance for which the task was created. + // + // - task-state - The state of the task ( successful | failed | in-progress | + // pending ). + // + // - mac-system-integrity-protection-configuration.sip-status - The overall SIP + // state requested in the task ( enabled | disabled ). + // + // - start-time - The date and time the task was created. + // + // - task-type - The type of task ( sip-modification | + // volume-ownership-delegation ). + Filters []types.Filter + + // The ID of task. + MacModificationTaskIds []string + + // The maximum number of results to return for the request in a single page. The + // remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given a + // larger value than 500, you receive an error. + MaxResults *int32 + + // The token to use to retrieve the next page of results. + NextToken *string + + noSmithyDocumentSerde +} + +type DescribeMacModificationTasksOutput struct { + + // Information about the tasks. + MacModificationTasks []types.MacModificationTask + + // The token to use to retrieve the next page of results. This value is null when + // there are no more results to return. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeMacModificationTasksMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpDescribeMacModificationTasks{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpDescribeMacModificationTasks{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeMacModificationTasks"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeMacModificationTasks(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// DescribeMacModificationTasksPaginatorOptions is the paginator options for +// DescribeMacModificationTasks +type DescribeMacModificationTasksPaginatorOptions struct { + // The maximum number of results to return for the request in a single page. The + // remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given a + // larger value than 500, you receive an error. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeMacModificationTasksPaginator is a paginator for +// DescribeMacModificationTasks +type DescribeMacModificationTasksPaginator struct { + options DescribeMacModificationTasksPaginatorOptions + client DescribeMacModificationTasksAPIClient + params *DescribeMacModificationTasksInput + nextToken *string + firstPage bool +} + +// NewDescribeMacModificationTasksPaginator returns a new +// DescribeMacModificationTasksPaginator +func NewDescribeMacModificationTasksPaginator(client DescribeMacModificationTasksAPIClient, params *DescribeMacModificationTasksInput, optFns ...func(*DescribeMacModificationTasksPaginatorOptions)) *DescribeMacModificationTasksPaginator { + if params == nil { + params = &DescribeMacModificationTasksInput{} + } + + options := DescribeMacModificationTasksPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeMacModificationTasksPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeMacModificationTasksPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeMacModificationTasks page. +func (p *DescribeMacModificationTasksPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeMacModificationTasksOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.DescribeMacModificationTasks(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// DescribeMacModificationTasksAPIClient is a client that implements the +// DescribeMacModificationTasks operation. +type DescribeMacModificationTasksAPIClient interface { + DescribeMacModificationTasks(context.Context, *DescribeMacModificationTasksInput, ...func(*Options)) (*DescribeMacModificationTasksOutput, error) +} + +var _ DescribeMacModificationTasksAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opDescribeMacModificationTasks(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeMacModificationTasks", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go index ae7d45ca5..a95aee04c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go @@ -13,8 +13,6 @@ import ( // Describes your managed prefix lists and any Amazon Web Services-managed prefix // lists. -// -// To view the entries for your prefix list, use GetManagedPrefixListEntries. func (c *Client) DescribeManagedPrefixLists(ctx context.Context, params *DescribeManagedPrefixListsInput, optFns ...func(*Options)) (*DescribeManagedPrefixListsOutput, error) { if params == nil { params = &DescribeManagedPrefixListsInput{} @@ -160,6 +158,36 @@ func (c *Client) addOperationDescribeManagedPrefixListsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go index 32c78c828..f4ad198a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go @@ -162,6 +162,36 @@ func (c *Client) addOperationDescribeMovingAddressesMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go index 1a7ff3e95..c26405fcf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go @@ -179,6 +179,36 @@ func (c *Client) addOperationDescribeNatGatewaysMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go index 20d11aa0c..3c631dc1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go @@ -208,6 +208,36 @@ func (c *Client) addOperationDescribeNetworkAclsMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go index 65390b5ce..4b90d23a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go @@ -163,6 +163,36 @@ func (c *Client) addOperationDescribeNetworkInsightsAccessScopeAnalysesMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go index f18f0f544..dfce08732 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDescribeNetworkInsightsAccessScopesMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go index a53cffe6c..37f5a5577 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go @@ -167,6 +167,36 @@ func (c *Client) addOperationDescribeNetworkInsightsAnalysesMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go index b0094826b..ef1ac3acb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go @@ -179,6 +179,36 @@ func (c *Client) addOperationDescribeNetworkInsightsPathsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go index 2d4709a52..fa26bed97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeNetworkInterfaceAttributeMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go index c9d088e27..241a1e8a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go @@ -166,6 +166,36 @@ func (c *Client) addOperationDescribeNetworkInterfacePermissionsMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go index 82b3e438d..72cb772bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go @@ -108,10 +108,10 @@ type DescribeNetworkInterfacesInput struct { // // - interface-type - The type of network interface ( api_gateway_managed | // aws_codestar_connections_managed | branch | ec2_instance_connect_endpoint | - // efa | efa-only | efs | gateway_load_balancer | gateway_load_balancer_endpoint - // | global_accelerator_managed | interface | iot_rules_managed | lambda | - // load_balancer | nat_gateway | network_load_balancer | quicksight | - // transit_gateway | trunk | vpc_endpoint ). + // efa | efa-only | efs | evs | gateway_load_balancer | + // gateway_load_balancer_endpoint | global_accelerator_managed | interface | + // iot_rules_managed | lambda | load_balancer | nat_gateway | + // network_load_balancer | quicksight | transit_gateway | trunk | vpc_endpoint ). // // - mac-address - The MAC address of the network interface. // @@ -279,6 +279,36 @@ func (c *Client) addOperationDescribeNetworkInterfacesMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeOutpostLags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeOutpostLags.go index c262ff29a..72aaead8b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeOutpostLags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeOutpostLags.go @@ -12,6 +12,8 @@ import ( ) // Describes the Outposts link aggregation groups (LAGs). +// +// LAGs are only available for second-generation Outposts racks at this time. func (c *Client) DescribeOutpostLags(ctx context.Context, params *DescribeOutpostLagsInput, optFns ...func(*Options)) (*DescribeOutpostLagsOutput, error) { if params == nil { params = &DescribeOutpostLagsInput{} @@ -178,6 +180,36 @@ func (c *Client) addOperationDescribeOutpostLagsMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go index 36c492b1a..14c89bddd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go @@ -179,6 +179,36 @@ func (c *Client) addOperationDescribePlacementGroupsMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go index 0deb7e144..b9773a862 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go @@ -14,8 +14,6 @@ import ( // Describes available Amazon Web Services services in a prefix list format, which // includes the prefix list name and prefix list ID of the service and the IP // address range for the service. -// -// We recommend that you use DescribeManagedPrefixLists instead. func (c *Client) DescribePrefixLists(ctx context.Context, params *DescribePrefixListsInput, optFns ...func(*Options)) (*DescribePrefixListsOutput, error) { if params == nil { params = &DescribePrefixListsInput{} @@ -159,6 +157,36 @@ func (c *Client) addOperationDescribePrefixListsMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go index e4a5c9618..eeb9cf968 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go @@ -170,6 +170,36 @@ func (c *Client) addOperationDescribePrincipalIdFormatMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go index c230b91d2..3b26c8e01 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDescribePublicIpv4PoolsMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go index 3176e0cf3..e08ca020f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeRegionsMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go index 0209a20ab..8ac181c60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go @@ -161,6 +161,36 @@ func (c *Client) addOperationDescribeReplaceRootVolumeTasksMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go index 7e120c1d6..18e8af963 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go @@ -206,6 +206,36 @@ func (c *Client) addOperationDescribeReservedInstancesMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go index f1b25d597..1fd01fab1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go @@ -173,6 +173,36 @@ func (c *Client) addOperationDescribeReservedInstancesListingsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go index 5c6a12b50..f3f8a6b8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go @@ -187,6 +187,36 @@ func (c *Client) addOperationDescribeReservedInstancesModificationsMiddlewares(s if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go index 3e5ecd1b1..c6cdea679 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go @@ -260,6 +260,36 @@ func (c *Client) addOperationDescribeReservedInstancesOfferingsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerEndpoints.go index fc17a9038..a72330546 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerEndpoints.go @@ -159,6 +159,36 @@ func (c *Client) addOperationDescribeRouteServerEndpointsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerPeers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerPeers.go index a8e4c2499..e218bc5f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerPeers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServerPeers.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeRouteServerPeersMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServers.go index 707aa374c..57fa469b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteServers.go @@ -174,6 +174,36 @@ func (c *Client) addOperationDescribeRouteServersMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go index 03215958a..b8a9a6a61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go @@ -230,6 +230,36 @@ func (c *Client) addOperationDescribeRouteTablesMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go index 90c65d3a6..141515f5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go @@ -188,6 +188,36 @@ func (c *Client) addOperationDescribeScheduledInstanceAvailabilityMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go index 815204803..a49433a16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go @@ -163,6 +163,36 @@ func (c *Client) addOperationDescribeScheduledInstancesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go index 2622e7886..6a79c7693 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDescribeSecurityGroupReferencesMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go index 07b729055..e3c44f8ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeSecurityGroupRulesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go index 6e1f59226..3f9ce4ccb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go @@ -8,7 +8,11 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + "strconv" + "time" ) // Describes security group VPC associations made with [AssociateSecurityGroupVpc]. @@ -41,6 +45,8 @@ type DescribeSecurityGroupVpcAssociationsInput struct { // // - group-id : The security group ID. // + // - group-owner-id : The group owner ID. + // // - vpc-id : The ID of the associated VPC. // // - vpc-owner-id : The account ID of the VPC owner. @@ -170,6 +176,36 @@ func (c *Client) addOperationDescribeSecurityGroupVpcAssociationsMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } @@ -185,6 +221,487 @@ func (c *Client) addOperationDescribeSecurityGroupVpcAssociationsMiddlewares(sta return nil } +// SecurityGroupVpcAssociationAssociatedWaiterOptions are waiter options for +// SecurityGroupVpcAssociationAssociatedWaiter +type SecurityGroupVpcAssociationAssociatedWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // SecurityGroupVpcAssociationAssociatedWaiter will use default minimum delay of 10 + // seconds. Note that MinDelay must resolve to a value lesser than or equal to the + // MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, SecurityGroupVpcAssociationAssociatedWaiter will use default max + // delay of 120 seconds. Note that MaxDelay must resolve to value greater than or + // equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeSecurityGroupVpcAssociationsInput, *DescribeSecurityGroupVpcAssociationsOutput, error) (bool, error) +} + +// SecurityGroupVpcAssociationAssociatedWaiter defines the waiters for +// SecurityGroupVpcAssociationAssociated +type SecurityGroupVpcAssociationAssociatedWaiter struct { + client DescribeSecurityGroupVpcAssociationsAPIClient + + options SecurityGroupVpcAssociationAssociatedWaiterOptions +} + +// NewSecurityGroupVpcAssociationAssociatedWaiter constructs a +// SecurityGroupVpcAssociationAssociatedWaiter. +func NewSecurityGroupVpcAssociationAssociatedWaiter(client DescribeSecurityGroupVpcAssociationsAPIClient, optFns ...func(*SecurityGroupVpcAssociationAssociatedWaiterOptions)) *SecurityGroupVpcAssociationAssociatedWaiter { + options := SecurityGroupVpcAssociationAssociatedWaiterOptions{} + options.MinDelay = 10 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = securityGroupVpcAssociationAssociatedStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &SecurityGroupVpcAssociationAssociatedWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for SecurityGroupVpcAssociationAssociated +// waiter. The maxWaitDur is the maximum wait duration the waiter will wait. The +// maxWaitDur is required and must be greater than zero. +func (w *SecurityGroupVpcAssociationAssociatedWaiter) Wait(ctx context.Context, params *DescribeSecurityGroupVpcAssociationsInput, maxWaitDur time.Duration, optFns ...func(*SecurityGroupVpcAssociationAssociatedWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for +// SecurityGroupVpcAssociationAssociated waiter and returns the output of the +// successful operation. The maxWaitDur is the maximum wait duration the waiter +// will wait. The maxWaitDur is required and must be greater than zero. +func (w *SecurityGroupVpcAssociationAssociatedWaiter) WaitForOutput(ctx context.Context, params *DescribeSecurityGroupVpcAssociationsInput, maxWaitDur time.Duration, optFns ...func(*SecurityGroupVpcAssociationAssociatedWaiterOptions)) (*DescribeSecurityGroupVpcAssociationsOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeSecurityGroupVpcAssociations(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for SecurityGroupVpcAssociationAssociated waiter") +} + +func securityGroupVpcAssociationAssociatedStateRetryable(ctx context.Context, input *DescribeSecurityGroupVpcAssociationsInput, output *DescribeSecurityGroupVpcAssociationsOutput, err error) (bool, error) { + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + var v2 []types.SecurityGroupVpcAssociationState + for _, v := range v1 { + v3 := v.State + v2 = append(v2, v3) + } + expectedValue := "associated" + match := len(v2) > 0 + for _, v := range v2 { + if string(v) != expectedValue { + match = false + break + } + } + + if match { + return false, nil + } + } + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + var v2 []types.SecurityGroupVpcAssociationState + for _, v := range v1 { + v3 := v.State + v2 = append(v2, v3) + } + expectedValue := "associating" + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break + } + } + + if match { + return true, nil + } + } + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + var v2 []types.SecurityGroupVpcAssociationState + for _, v := range v1 { + v3 := v.State + v2 = append(v2, v3) + } + expectedValue := "association-failed" + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break + } + } + + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + + if err != nil { + return false, err + } + return true, nil +} + +// SecurityGroupVpcAssociationDisassociatedWaiterOptions are waiter options for +// SecurityGroupVpcAssociationDisassociatedWaiter +type SecurityGroupVpcAssociationDisassociatedWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // SecurityGroupVpcAssociationDisassociatedWaiter will use default minimum delay of + // 10 seconds. Note that MinDelay must resolve to a value lesser than or equal to + // the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, SecurityGroupVpcAssociationDisassociatedWaiter will use default max + // delay of 120 seconds. Note that MaxDelay must resolve to value greater than or + // equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *DescribeSecurityGroupVpcAssociationsInput, *DescribeSecurityGroupVpcAssociationsOutput, error) (bool, error) +} + +// SecurityGroupVpcAssociationDisassociatedWaiter defines the waiters for +// SecurityGroupVpcAssociationDisassociated +type SecurityGroupVpcAssociationDisassociatedWaiter struct { + client DescribeSecurityGroupVpcAssociationsAPIClient + + options SecurityGroupVpcAssociationDisassociatedWaiterOptions +} + +// NewSecurityGroupVpcAssociationDisassociatedWaiter constructs a +// SecurityGroupVpcAssociationDisassociatedWaiter. +func NewSecurityGroupVpcAssociationDisassociatedWaiter(client DescribeSecurityGroupVpcAssociationsAPIClient, optFns ...func(*SecurityGroupVpcAssociationDisassociatedWaiterOptions)) *SecurityGroupVpcAssociationDisassociatedWaiter { + options := SecurityGroupVpcAssociationDisassociatedWaiterOptions{} + options.MinDelay = 10 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = securityGroupVpcAssociationDisassociatedStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &SecurityGroupVpcAssociationDisassociatedWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for SecurityGroupVpcAssociationDisassociated +// waiter. The maxWaitDur is the maximum wait duration the waiter will wait. The +// maxWaitDur is required and must be greater than zero. +func (w *SecurityGroupVpcAssociationDisassociatedWaiter) Wait(ctx context.Context, params *DescribeSecurityGroupVpcAssociationsInput, maxWaitDur time.Duration, optFns ...func(*SecurityGroupVpcAssociationDisassociatedWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for +// SecurityGroupVpcAssociationDisassociated waiter and returns the output of the +// successful operation. The maxWaitDur is the maximum wait duration the waiter +// will wait. The maxWaitDur is required and must be greater than zero. +func (w *SecurityGroupVpcAssociationDisassociatedWaiter) WaitForOutput(ctx context.Context, params *DescribeSecurityGroupVpcAssociationsInput, maxWaitDur time.Duration, optFns ...func(*SecurityGroupVpcAssociationDisassociatedWaiterOptions)) (*DescribeSecurityGroupVpcAssociationsOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.DescribeSecurityGroupVpcAssociations(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for SecurityGroupVpcAssociationDisassociated waiter") +} + +func securityGroupVpcAssociationDisassociatedStateRetryable(ctx context.Context, input *DescribeSecurityGroupVpcAssociationsInput, output *DescribeSecurityGroupVpcAssociationsOutput, err error) (bool, error) { + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + var v2 []types.SecurityGroupVpcAssociationState + for _, v := range v1 { + v3 := v.State + v2 = append(v2, v3) + } + expectedValue := "disassociated" + match := len(v2) > 0 + for _, v := range v2 { + if string(v) != expectedValue { + match = false + break + } + } + + if match { + return false, nil + } + } + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + var v2 []types.SecurityGroupVpcAssociationState + for _, v := range v1 { + v3 := v.State + v2 = append(v2, v3) + } + expectedValue := "disassociating" + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break + } + } + + if match { + return true, nil + } + } + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + var v2 []types.SecurityGroupVpcAssociationState + for _, v := range v1 { + v3 := v.State + v2 = append(v2, v3) + } + expectedValue := "disassociation-failed" + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break + } + } + + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + + if err == nil { + v1 := output.SecurityGroupVpcAssociations + v2 := len(v1) + v3 := 0 + v4 := int64(v2) == int64(v3) + expectedValue := "true" + bv, err := strconv.ParseBool(expectedValue) + if err != nil { + return false, fmt.Errorf("error parsing boolean from string %w", err) + } + if v4 == bv { + return false, nil + } + } + + if err != nil { + return false, err + } + return true, nil +} + // DescribeSecurityGroupVpcAssociationsPaginatorOptions is the paginator options // for DescribeSecurityGroupVpcAssociations type DescribeSecurityGroupVpcAssociationsPaginatorOptions struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go index d2df992bc..92db9482c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go @@ -245,6 +245,36 @@ func (c *Client) addOperationDescribeSecurityGroupsMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeServiceLinkVirtualInterfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeServiceLinkVirtualInterfaces.go index 5c7bd7a46..909e9a6b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeServiceLinkVirtualInterfaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeServiceLinkVirtualInterfaces.go @@ -169,6 +169,36 @@ func (c *Client) addOperationDescribeServiceLinkVirtualInterfacesMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go index c602e5ee9..f5562aa80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go @@ -159,6 +159,36 @@ func (c *Client) addOperationDescribeSnapshotAttributeMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go index 01e185f0f..339aa9a12 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go @@ -162,6 +162,36 @@ func (c *Client) addOperationDescribeSnapshotTierStatusMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go index 7616f534c..951f5f1eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go @@ -57,8 +57,6 @@ import ( // If you are describing a long list of snapshots, we recommend that you paginate // the output to make the list more manageable. For more information, see [Pagination]. // -// To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores. -// // For more information about EBS snapshots, see [Amazon EBS snapshots] in the Amazon EBS User Guide. // // We strongly recommend using only paginated requests. Unpaginated requests are @@ -256,6 +254,36 @@ func (c *Client) addOperationDescribeSnapshotsMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go index 67132b123..4f8d575b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go @@ -139,6 +139,36 @@ func (c *Client) addOperationDescribeSpotDatafeedSubscriptionMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go index 35e558b2a..65441686a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go @@ -163,6 +163,36 @@ func (c *Client) addOperationDescribeSpotFleetInstancesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go index 8a72bb8b2..4970d1ed0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go @@ -191,6 +191,36 @@ func (c *Client) addOperationDescribeSpotFleetRequestHistoryMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go index b97954c79..f74a199ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go @@ -157,6 +157,36 @@ func (c *Client) addOperationDescribeSpotFleetRequestsMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go index 01c017919..a6a152648 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go @@ -108,6 +108,9 @@ type DescribeSpotInstanceRequestsInput struct { // - launched-availability-zone - The Availability Zone in which the request is // launched. // + // - launched-availability-zone-id - The ID of the Availability Zone in which the + // request is launched. + // // - network-interface.addresses.primary - Indicates whether the IP address is // the primary private IP address. // @@ -283,6 +286,36 @@ func (c *Client) addOperationDescribeSpotInstanceRequestsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go index fe8400c64..2e96be46b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go @@ -39,8 +39,15 @@ func (c *Client) DescribeSpotPriceHistory(ctx context.Context, params *DescribeS type DescribeSpotPriceHistoryInput struct { // Filters the results by the specified Availability Zone. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both AvailabilityZone *string + // Filters the results by the specified ID of the Availability Zone. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both + AvailabilityZoneId *string + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is @@ -56,6 +63,9 @@ type DescribeSpotPriceHistoryInput struct { // - availability-zone - The Availability Zone for which prices should be // returned. // + // - availability-zone-id - The ID of the Availability Zone for which prices + // should be returned. + // // - instance-type - The type of instance (for example, m3.medium ). // // - product-description - The product description for the Spot price ( Linux/UNIX @@ -196,6 +206,36 @@ func (c *Client) addOperationDescribeSpotPriceHistoryMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go index 20cd68344..681a49186 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeStaleSecurityGroupsMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go index 46a28ff7e..d9d22ed6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go @@ -26,12 +26,12 @@ import ( // past 31 days can be viewed. // // To use this API, you must have the required permissions. For more information, -// see [Permissions for storing and restoring AMIs using Amazon S3]in the Amazon EC2 User Guide. +// see [Permissions for storing and restoring AMIs using S3]in the Amazon EC2 User Guide. // -// For more information, see [Store and restore an AMI using Amazon S3] in the Amazon EC2 User Guide. +// For more information, see [Store and restore an AMI using S3] in the Amazon EC2 User Guide. // -// [Store and restore an AMI using Amazon S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html -// [Permissions for storing and restoring AMIs using Amazon S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions +// [Store and restore an AMI using S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html +// [Permissions for storing and restoring AMIs using S3]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/work-with-ami-store-restore.html#ami-s3-permissions func (c *Client) DescribeStoreImageTasks(ctx context.Context, params *DescribeStoreImageTasksInput, optFns ...func(*Options)) (*DescribeStoreImageTasksOutput, error) { if params == nil { params = &DescribeStoreImageTasksInput{} @@ -187,6 +187,36 @@ func (c *Client) addOperationDescribeStoreImageTasksMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go index 7772da2b4..0151a945b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go @@ -246,6 +246,36 @@ func (c *Client) addOperationDescribeSubnetsMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go index 59193f8c6..e61a35675 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go @@ -178,6 +178,36 @@ func (c *Client) addOperationDescribeTagsMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go index 3baf1a572..4d4da2630 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go @@ -178,6 +178,36 @@ func (c *Client) addOperationDescribeTrafficMirrorFilterRulesMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go index ce1db7e72..28b4af60e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go @@ -155,6 +155,36 @@ func (c *Client) addOperationDescribeTrafficMirrorFiltersMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go index 5146c65fb..7a49be354 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go @@ -172,6 +172,36 @@ func (c *Client) addOperationDescribeTrafficMirrorSessionsMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go index a36cc59ff..da41013ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go @@ -163,6 +163,36 @@ func (c *Client) addOperationDescribeTrafficMirrorTargetsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go index 6e4406be0..153d09e01 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go @@ -178,6 +178,36 @@ func (c *Client) addOperationDescribeTransitGatewayAttachmentsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go index e09b552c5..135b9d2eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go @@ -158,6 +158,36 @@ func (c *Client) addOperationDescribeTransitGatewayConnectPeersMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go index 4af640cac..cf9e76d6b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go @@ -164,6 +164,36 @@ func (c *Client) addOperationDescribeTransitGatewayConnectsMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go index 60d911ac6..e48102913 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go @@ -159,6 +159,36 @@ func (c *Client) addOperationDescribeTransitGatewayMulticastDomainsMiddlewares(s if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go index e15d5c6e4..a5bbfd133 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go @@ -172,6 +172,36 @@ func (c *Client) addOperationDescribeTransitGatewayPeeringAttachmentsMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go index 60fe0dcf7..4c2d6da42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDescribeTransitGatewayPolicyTablesMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go index ce275c6e7..97e1214e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDescribeTransitGatewayRouteTableAnnouncementsMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go index 93217a6e3..b7a2b3bd9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeTransitGatewayRouteTablesMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go index 0a540fe13..628e11861 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go @@ -162,6 +162,36 @@ func (c *Client) addOperationDescribeTransitGatewayVpcAttachmentsMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go index 1e2990d94..2b48f4ecb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go @@ -192,6 +192,36 @@ func (c *Client) addOperationDescribeTransitGatewaysMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go index a97b3b82d..55cd0b5cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go @@ -155,6 +155,36 @@ func (c *Client) addOperationDescribeTrunkInterfaceAssociationsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go index cbe9f60a2..85950f58b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go @@ -157,6 +157,36 @@ func (c *Client) addOperationDescribeVerifiedAccessEndpointsMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go index e51f27b5b..9c2791606 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDescribeVerifiedAccessGroupsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go index bb555f04f..042651ac4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDescribeVerifiedAccessInstanceLoggingConfigurations if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go index 7e1114ba3..c1713c1fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDescribeVerifiedAccessInstancesMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go index 52f195203..531237d11 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDescribeVerifiedAccessTrustProvidersMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go index 5834d5dae..6b13cbe58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go @@ -158,6 +158,36 @@ func (c *Client) addOperationDescribeVolumeAttributeMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go index fa6238e61..fe131a377 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go @@ -39,8 +39,8 @@ import ( // Actions: Reflect the actions you might have to take in response to an event. // For example, if the status of the volume is impaired and the volume event shows // potential-data-inconsistency , then the action shows enable-volume-io . This -// means that you may want to enable the I/O operations for the volume by calling -// the EnableVolumeIOaction and then check the volume for data consistency. +// means that you may want to enable the I/O operations for the volume and then +// check the volume for data consistency. For more information, see [Work with an impaired EBS volume]. // // Volume status is based on the volume status checks, and does not reflect the // volume state. Therefore, volume status does not indicate volumes in the error @@ -51,6 +51,7 @@ import ( // particular order. // // [Monitor the status of your volumes]: https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-status.html +// [Work with an impaired EBS volume]: https://docs.aws.amazon.com/ebs/latest/userguide/work_volumes_impaired.html func (c *Client) DescribeVolumeStatus(ctx context.Context, params *DescribeVolumeStatusInput, optFns ...func(*Options)) (*DescribeVolumeStatusOutput, error) { if params == nil { params = &DescribeVolumeStatusInput{} @@ -226,6 +227,36 @@ func (c *Client) addOperationDescribeVolumeStatusMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go index 13b6f6246..6ec8f43e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go @@ -72,6 +72,9 @@ type DescribeVolumesInput struct { // // - availability-zone - The Availability Zone in which the volume was created. // + // - availability-zone-id - The ID of the Availability Zone in which the volume + // was created. + // // - create-time - The time stamp when the volume was created. // // - encrypted - Indicates whether the volume is encrypted ( true | false ) @@ -225,6 +228,36 @@ func (c *Client) addOperationDescribeVolumesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go index 69e2e7ad8..0fb751a72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go @@ -186,6 +186,36 @@ func (c *Client) addOperationDescribeVolumesModificationsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go index 8dfa29556..853fe499b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go @@ -161,6 +161,36 @@ func (c *Client) addOperationDescribeVpcAttributeMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go index 5f7868172..079a024e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go @@ -183,6 +183,36 @@ func (c *Client) addOperationDescribeVpcBlockPublicAccessExclusionsMiddlewares(s if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go index 16267e889..3673d232c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go @@ -140,6 +140,36 @@ func (c *Client) addOperationDescribeVpcBlockPublicAccessOptionsMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go index 3e61ed0b0..117d3ad7c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDescribeVpcClassicLinkMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go index f68bed780..d601d3793 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDescribeVpcClassicLinkDnsSupportMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go index e8b27f231..0b5626f8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go @@ -53,8 +53,6 @@ type DescribeVpcEndpointAssociationsInput struct { // // - resource-configuration-group-arn - The Amazon Resource Name (ARN) of the // resource configuration of type GROUP. - // - // - service-network-resource-association-id - The ID of the association. Filters []types.Filter // The maximum page size. @@ -168,6 +166,36 @@ func (c *Client) addOperationDescribeVpcEndpointAssociationsMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go index 01ab89185..42bb1f899 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go @@ -165,6 +165,36 @@ func (c *Client) addOperationDescribeVpcEndpointConnectionNotificationsMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go index cc75904c3..14b036975 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go @@ -166,6 +166,36 @@ func (c *Client) addOperationDescribeVpcEndpointConnectionsMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go index 55f818ffe..635662dbb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go @@ -171,6 +171,36 @@ func (c *Client) addOperationDescribeVpcEndpointServiceConfigurationsMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go index 899d4b5f4..6afb77035 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go @@ -164,6 +164,36 @@ func (c *Client) addOperationDescribeVpcEndpointServicePermissionsMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go index 5485d0018..e0767bb1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go @@ -187,6 +187,36 @@ func (c *Client) addOperationDescribeVpcEndpointServicesMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go index 304b7b895..8a98c7349 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go @@ -180,6 +180,36 @@ func (c *Client) addOperationDescribeVpcEndpointsMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go index 29ae4ebd8..bd8f5f439 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go @@ -198,6 +198,36 @@ func (c *Client) addOperationDescribeVpcPeeringConnectionsMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go index 09b4bdcda..3c510d5df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go @@ -205,6 +205,36 @@ func (c *Client) addOperationDescribeVpcsMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go index 79cd9cfd4..eaefc3c9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go @@ -189,6 +189,36 @@ func (c *Client) addOperationDescribeVpnConnectionsMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go index 956fc7bf5..144071ad7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go @@ -176,6 +176,36 @@ func (c *Client) addOperationDescribeVpnGatewaysMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go index ac0911eaf..b24157038 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDetachClassicLinkVpcMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go index f7768be5f..692f62baa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDetachInternetGatewayMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go index 02b589a58..7f3532552 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go @@ -155,6 +155,36 @@ func (c *Client) addOperationDetachNetworkInterfaceMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go index 76e0562bc..c1eaf857c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go @@ -160,6 +160,36 @@ func (c *Client) addOperationDetachVerifiedAccessTrustProviderMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go index 343ea9539..8cd22af1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go @@ -23,10 +23,9 @@ import ( // When a volume with an Amazon Web Services Marketplace product code is detached // from an instance, the product code is no longer associated with the instance. // -// You can't detach or force detach volumes that are attached to Amazon ECS or -// Fargate tasks. Attempting to do this results in the -// UnsupportedOperationException exception with the Unable to detach volume -// attached to ECS tasks error message. +// You can't detach or force detach volumes that are attached to Amazon Web +// Services-managed resources. Attempting to do this results in the +// UnsupportedOperationException exception. // // For more information, see [Detach an Amazon EBS volume] in the Amazon EBS User Guide. // @@ -81,7 +80,8 @@ type DetachVolumeInput struct { // Describes volume attachment details. type DetachVolumeOutput struct { - // The ARN of the Amazon ECS or Fargate task to which the volume is attached. + // The ARN of the Amazon Web Services-managed resource to which the volume is + // attached. AssociatedResource *string // The time stamp when the attachment initiated. @@ -92,18 +92,21 @@ type DetachVolumeOutput struct { // The device name. // - // If the volume is attached to a Fargate task, this parameter returns null . + // If the volume is attached to an Amazon Web Services-managed resource, this + // parameter returns null . Device *string // The ID of the instance. // - // If the volume is attached to a Fargate task, this parameter returns null . + // If the volume is attached to an Amazon Web Services-managed resource, this + // parameter returns null . InstanceId *string - // The service principal of Amazon Web Services service that owns the underlying - // instance to which the volume is attached. + // The service principal of the Amazon Web Services service that owns the + // underlying resource to which the volume is attached. // - // This parameter is returned only for volumes that are attached to Fargate tasks. + // This parameter is returned only for volumes that are attached to Amazon Web + // Services-managed resources. InstanceOwningService *string // The attachment state of the volume. @@ -206,6 +209,36 @@ func (c *Client) addOperationDetachVolumeMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go index 71bfe7cc7..cf3f6f993 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDetachVpnGatewayMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go index e6bb42eea..d144ef0e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go @@ -145,6 +145,36 @@ func (c *Client) addOperationDisableAddressTransferMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go index 48c950aac..6f2311514 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go @@ -146,6 +146,36 @@ func (c *Client) addOperationDisableAllowedImagesSettingsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go index 97e67a75e..5b8f912da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDisableAwsNetworkPerformanceMetricSubscriptionMiddl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go index ae1ee7b02..648ac87e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go @@ -143,6 +143,36 @@ func (c *Client) addOperationDisableEbsEncryptionByDefaultMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go index d693db3cd..e0f0c1deb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go @@ -182,6 +182,36 @@ func (c *Client) addOperationDisableFastLaunchMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go index 284c8da6a..f95e6f7b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go @@ -153,6 +153,36 @@ func (c *Client) addOperationDisableFastSnapshotRestoresMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go index e109e4aaf..b343666ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go @@ -158,6 +158,36 @@ func (c *Client) addOperationDisableImageMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go index d4dc61be3..ffc2e2903 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go @@ -22,7 +22,7 @@ import ( // // For more information, see [Block public access to your AMIs] in the Amazon EC2 User Guide. // -// [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis +// [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-public-access-to-amis.html // [GetImageBlockPublicAccessState]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html func (c *Client) DisableImageBlockPublicAccess(ctx context.Context, params *DisableImageBlockPublicAccessInput, optFns ...func(*Options)) (*DisableImageBlockPublicAccessOutput, error) { if params == nil { @@ -146,6 +146,36 @@ func (c *Client) addOperationDisableImageBlockPublicAccessMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go index 234dfde61..bf7e73927 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go @@ -12,9 +12,9 @@ import ( // Cancels the deprecation of the specified AMI. // -// For more information, see [Deprecate an AMI] in the Amazon EC2 User Guide. +// For more information, see [Deprecate an Amazon EC2 AMI] in the Amazon EC2 User Guide. // -// [Deprecate an AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deprecate.html +// [Deprecate an Amazon EC2 AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deprecate.html func (c *Client) DisableImageDeprecation(ctx context.Context, params *DisableImageDeprecationInput, optFns ...func(*Options)) (*DisableImageDeprecationOutput, error) { if params == nil { params = &DisableImageDeprecationInput{} @@ -145,6 +145,36 @@ func (c *Client) addOperationDisableImageDeprecationMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go index 95068d3ca..d63c30fd9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go @@ -17,9 +17,9 @@ import ( // deregistration protection for the AMI, then, when you disable deregistration // protection, you won’t immediately be able to deregister the AMI. // -// For more information, see [Protect an AMI from deregistration] in the Amazon EC2 User Guide. +// For more information, see [Protect an Amazon EC2 AMI from deregistration] in the Amazon EC2 User Guide. // -// [Protect an AMI from deregistration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html#ami-deregistration-protection +// [Protect an Amazon EC2 AMI from deregistration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deregistration-protection.html func (c *Client) DisableImageDeregistrationProtection(ctx context.Context, params *DisableImageDeregistrationProtectionInput, optFns ...func(*Options)) (*DisableImageDeregistrationProtectionOutput, error) { if params == nil { params = &DisableImageDeregistrationProtectionInput{} @@ -150,6 +150,36 @@ func (c *Client) addOperationDisableImageDeregistrationProtectionMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go index ecb450eb3..f029af450 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDisableIpamOrganizationAdminAccountMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableRouteServerPropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableRouteServerPropagation.go index 8e4b3a081..48208f9d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableRouteServerPropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableRouteServerPropagation.go @@ -175,6 +175,36 @@ func (c *Client) addOperationDisableRouteServerPropagationMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go index 18a80126e..1fe92317f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go @@ -139,6 +139,36 @@ func (c *Client) addOperationDisableSerialConsoleAccessMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go index 6a7134c62..fc2de127b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go @@ -149,6 +149,36 @@ func (c *Client) addOperationDisableSnapshotBlockPublicAccessMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go index b09984fe6..20c33e598 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go @@ -149,6 +149,36 @@ func (c *Client) addOperationDisableTransitGatewayRouteTablePropagationMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go index 227fe0f1d..3853f7a26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDisableVgwRoutePropagationMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go index 73f51e901..580c2ea72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDisableVpcClassicLinkMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go index f5f2c0fbd..55517a841 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go @@ -136,6 +136,36 @@ func (c *Client) addOperationDisableVpcClassicLinkDnsSupportMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go index 3dfc1aab7..703fc5ed0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go @@ -15,6 +15,15 @@ import ( // // This is an idempotent operation. If you perform the operation more than once, // Amazon EC2 doesn't return an error. +// +// An address cannot be disassociated if the all of the following conditions are +// met: +// +// - Network interface has a publicDualStackDnsName publicDnsName +// +// - Public IPv4 address is the primary public IPv4 address +// +// - Network interface only has one remaining public IPv4 address func (c *Client) DisassociateAddress(ctx context.Context, params *DisassociateAddressInput, optFns ...func(*Options)) (*DisassociateAddressOutput, error) { if params == nil { params = &DisassociateAddressInput{} @@ -139,6 +148,36 @@ func (c *Client) addOperationDisassociateAddressMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go index 88d764f31..751aef640 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go @@ -150,6 +150,36 @@ func (c *Client) addOperationDisassociateCapacityReservationBillingOwnerMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go index f3bb15c73..9e0b6a8a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go @@ -159,6 +159,36 @@ func (c *Client) addOperationDisassociateClientVpnTargetNetworkMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go index b833ea019..22c126342 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDisassociateEnclaveCertificateIamRoleMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go index 5c17867eb..b80375d89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go @@ -138,6 +138,36 @@ func (c *Client) addOperationDisassociateIamInstanceProfileMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go index e4736d4de..20bfdf6ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go @@ -151,6 +151,36 @@ func (c *Client) addOperationDisassociateInstanceEventWindowMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go index 99237e515..0814ab801 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDisassociateIpamByoasnMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go index 7dd604072..6a151f825 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDisassociateIpamResourceDiscoveryMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go index d084720b9..8895c7b42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go @@ -168,6 +168,36 @@ func (c *Client) addOperationDisassociateNatGatewayAddressMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteServer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteServer.go index 5cfbad09e..e20ec7c01 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteServer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteServer.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDisassociateRouteServerMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go index 3961e9ec6..0d8c74f31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go @@ -144,6 +144,36 @@ func (c *Client) addOperationDisassociateRouteTableMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go index 1f8b2088e..0cf23a802 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go @@ -154,6 +154,36 @@ func (c *Client) addOperationDisassociateSecurityGroupVpcMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go index 61006bdc8..0925c6f5a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go @@ -141,6 +141,36 @@ func (c *Client) addOperationDisassociateSubnetCidrBlockMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go index ec2ef2cc6..c468e3fdc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go @@ -152,6 +152,36 @@ func (c *Client) addOperationDisassociateTransitGatewayMulticastDomainMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go index 9f6293829..d5d133bd3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go @@ -147,6 +147,36 @@ func (c *Client) addOperationDisassociateTransitGatewayPolicyTableMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go index 328e067d1..af97466f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go @@ -147,6 +147,36 @@ func (c *Client) addOperationDisassociateTransitGatewayRouteTableMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go index 1ad27ed39..23b2b9276 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go @@ -157,6 +157,36 @@ func (c *Client) addOperationDisassociateTrunkInterfaceMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go index 7373a7a2b..c86f8c944 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go @@ -148,6 +148,36 @@ func (c *Client) addOperationDisassociateVpcCidrBlockMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go index 2639464f5..fed889cbc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go @@ -150,6 +150,36 @@ func (c *Client) addOperationEnableAddressTransferMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go index ef61e08b6..c534a4d93 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go @@ -170,6 +170,36 @@ func (c *Client) addOperationEnableAllowedImagesSettingsMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go index 8e5ba6700..ecb50c596 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go @@ -150,6 +150,36 @@ func (c *Client) addOperationEnableAwsNetworkPerformanceMetricSubscriptionMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go index e9ada8f0d..31be9a7cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go @@ -17,8 +17,6 @@ import ( // specified when you created each volume. For more information, see [Amazon EBS encryption]in the Amazon // EBS User Guide. // -// You can specify the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId. -// // Enabling encryption by default has no effect on the encryption status of your // existing volumes. // @@ -149,6 +147,36 @@ func (c *Client) addOperationEnableEbsEncryptionByDefaultMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go index c0d164578..928bea0ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go @@ -200,6 +200,36 @@ func (c *Client) addOperationEnableFastLaunchMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go index 57c77bbe1..4816521f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go @@ -15,8 +15,7 @@ import ( // Availability Zones. // // You get the full benefit of fast snapshot restores after they enter the enabled -// state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. To disable -// fast snapshot restores, use DisableFastSnapshotRestores. +// state. // // For more information, see [Amazon EBS fast snapshot restore] in the Amazon EBS User Guide. // @@ -163,6 +162,36 @@ func (c *Client) addOperationEnableFastSnapshotRestoresMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go index 1ce5221d2..39fa0288b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go @@ -18,9 +18,9 @@ import ( // // Only the AMI owner can re-enable a disabled AMI. // -// For more information, see [Disable an AMI] in the Amazon EC2 User Guide. +// For more information, see [Disable an Amazon EC2 AMI] in the Amazon EC2 User Guide. // -// [Disable an AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/disable-an-ami.html +// [Disable an Amazon EC2 AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/disable-an-ami.html func (c *Client) EnableImage(ctx context.Context, params *EnableImageInput, optFns ...func(*Options)) (*EnableImageOutput, error) { if params == nil { params = &EnableImageInput{} @@ -151,6 +151,36 @@ func (c *Client) addOperationEnableImageMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go index 7209bc43b..8617259e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go @@ -21,7 +21,7 @@ import ( // // For more information, see [Block public access to your AMIs] in the Amazon EC2 User Guide. // -// [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis +// [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-public-access-to-amis.html // [GetImageBlockPublicAccessState]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html func (c *Client) EnableImageBlockPublicAccess(ctx context.Context, params *EnableImageBlockPublicAccessInput, optFns ...func(*Options)) (*EnableImageBlockPublicAccessOutput, error) { if params == nil { @@ -156,6 +156,36 @@ func (c *Client) addOperationEnableImageBlockPublicAccessMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go index 33a3d746c..799a6a40f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go @@ -157,6 +157,36 @@ func (c *Client) addOperationEnableImageDeprecationMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go index 8e75e272b..6840bc4d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go @@ -14,11 +14,11 @@ import ( // enabled, the AMI can't be deregistered. // // To allow the AMI to be deregistered, you must first disable deregistration -// protection using DisableImageDeregistrationProtection. +// protection. // -// For more information, see [Protect an AMI from deregistration] in the Amazon EC2 User Guide. +// For more information, see [Protect an Amazon EC2 AMI from deregistration] in the Amazon EC2 User Guide. // -// [Protect an AMI from deregistration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/deregister-ami.html#ami-deregistration-protection +// [Protect an Amazon EC2 AMI from deregistration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-deregistration-protection.html func (c *Client) EnableImageDeregistrationProtection(ctx context.Context, params *EnableImageDeregistrationProtectionInput, optFns ...func(*Options)) (*EnableImageDeregistrationProtectionOutput, error) { if params == nil { params = &EnableImageDeregistrationProtectionInput{} @@ -153,6 +153,36 @@ func (c *Client) addOperationEnableImageDeregistrationProtectionMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go index 1d2cd5a2b..a300a7063 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go @@ -145,6 +145,36 @@ func (c *Client) addOperationEnableIpamOrganizationAdminAccountMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go index a4ae0990f..35bc1ae3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go @@ -139,6 +139,36 @@ func (c *Client) addOperationEnableReachabilityAnalyzerOrganizationSharingMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableRouteServerPropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableRouteServerPropagation.go index 88cb0e989..b97dfae9d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableRouteServerPropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableRouteServerPropagation.go @@ -155,6 +155,36 @@ func (c *Client) addOperationEnableRouteServerPropagationMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go index 942421c62..4976dc519 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go @@ -139,6 +139,36 @@ func (c *Client) addOperationEnableSerialConsoleAccessMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go index 2333c0264..0b2c71195 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go @@ -174,6 +174,36 @@ func (c *Client) addOperationEnableSnapshotBlockPublicAccessMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go index aa3159320..d0fd2249c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go @@ -149,6 +149,36 @@ func (c *Client) addOperationEnableTransitGatewayRouteTablePropagationMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go index 05d0131a3..6dda21f0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go @@ -147,6 +147,36 @@ func (c *Client) addOperationEnableVgwRoutePropagationMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go index 10c65f795..cfaf86007 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go @@ -138,6 +138,36 @@ func (c *Client) addOperationEnableVolumeIOMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go index 1bc2c50b4..29d6b6f69 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go @@ -148,6 +148,36 @@ func (c *Client) addOperationEnableVpcClassicLinkMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go index 9f7f0856b..ff25bed72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go @@ -138,6 +138,36 @@ func (c *Client) addOperationEnableVpcClassicLinkDnsSupportMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go index c1ee512d6..4bec9ce45 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go @@ -146,6 +146,36 @@ func (c *Client) addOperationExportClientVpnClientCertificateRevocationListMiddl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go index 2a4b2c6c6..9304a4136 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go @@ -144,6 +144,36 @@ func (c *Client) addOperationExportClientVpnClientConfigurationMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go index ffd9f2e32..0336183f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go @@ -202,6 +202,36 @@ func (c *Client) addOperationExportImageMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go index 4761de005..df98500bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go @@ -182,6 +182,36 @@ func (c *Client) addOperationExportTransitGatewayRoutesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go index 4aafbe3c6..593e5508c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go @@ -157,6 +157,36 @@ func (c *Client) addOperationExportVerifiedAccessInstanceClientConfigurationMidd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetActiveVpnTunnelStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetActiveVpnTunnelStatus.go new file mode 100644 index 000000000..bd2c3a48d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetActiveVpnTunnelStatus.go @@ -0,0 +1,202 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the currently negotiated security parameters for an active VPN tunnel, +// including IKE version, DH groups, encryption algorithms, and integrity +// algorithms. +func (c *Client) GetActiveVpnTunnelStatus(ctx context.Context, params *GetActiveVpnTunnelStatusInput, optFns ...func(*Options)) (*GetActiveVpnTunnelStatusOutput, error) { + if params == nil { + params = &GetActiveVpnTunnelStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetActiveVpnTunnelStatus", params, optFns, c.addOperationGetActiveVpnTunnelStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetActiveVpnTunnelStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetActiveVpnTunnelStatusInput struct { + + // The ID of the VPN connection for which to retrieve the active tunnel status. + // + // This member is required. + VpnConnectionId *string + + // The external IP address of the VPN tunnel for which to retrieve the active + // status. + // + // This member is required. + VpnTunnelOutsideIpAddress *string + + // Checks whether you have the required permissions for the action, without + // actually making the request. + DryRun *bool + + noSmithyDocumentSerde +} + +type GetActiveVpnTunnelStatusOutput struct { + + // Information about the current security configuration of the VPN tunnel. + ActiveVpnTunnelStatus *types.ActiveVpnTunnelStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetActiveVpnTunnelStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpGetActiveVpnTunnelStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpGetActiveVpnTunnelStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetActiveVpnTunnelStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetActiveVpnTunnelStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetActiveVpnTunnelStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetActiveVpnTunnelStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetActiveVpnTunnelStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go index 75a40caf2..00a8fb5b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go @@ -167,6 +167,36 @@ func (c *Client) addOperationGetAllowedImagesSettingsMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go index 6fbad3128..eb12e3b44 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go @@ -147,6 +147,36 @@ func (c *Client) addOperationGetAssociatedEnclaveCertificateIamRolesMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go index d43e72399..36cb138d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go @@ -154,6 +154,36 @@ func (c *Client) addOperationGetAssociatedIpv6PoolCidrsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go index 9274dc314..c888f01eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go @@ -157,6 +157,36 @@ func (c *Client) addOperationGetAwsNetworkPerformanceDataMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go index 69b2f8605..d0d5610bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go @@ -216,6 +216,36 @@ func (c *Client) addOperationGetCapacityReservationUsageMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go index 0d8d7a4c6..4b273d4c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go @@ -172,6 +172,36 @@ func (c *Client) addOperationGetCoipPoolUsageMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go index 7f185b9bc..8e1ba5fe3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go @@ -161,6 +161,36 @@ func (c *Client) addOperationGetConsoleOutputMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go index e113f64d6..157abf26c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go @@ -155,6 +155,36 @@ func (c *Client) addOperationGetConsoleScreenshotMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go index d4ce0b169..4c7a34137 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go @@ -186,6 +186,36 @@ func (c *Client) addOperationGetDeclarativePoliciesReportSummaryMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go index 10cc79343..dd229502d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go @@ -147,6 +147,36 @@ func (c *Client) addOperationGetDefaultCreditSpecificationMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go index d5b131b6b..8dd6d5fe7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go @@ -11,8 +11,7 @@ import ( ) // Describes the default KMS key for EBS encryption by default for your account in -// this Region. You can change the default KMS key for encryption by default using ModifyEbsDefaultKmsKeyId -// or ResetEbsDefaultKmsKeyId. +// this Region. // // For more information, see [Amazon EBS encryption] in the Amazon EBS User Guide. // @@ -139,6 +138,36 @@ func (c *Client) addOperationGetEbsDefaultKmsKeyIdMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go index 2a0a14552..08832f6be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go @@ -142,6 +142,36 @@ func (c *Client) addOperationGetEbsEncryptionByDefaultMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go index bed8eadd9..c81481e67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go @@ -169,6 +169,36 @@ func (c *Client) addOperationGetFlowLogsIntegrationTemplateMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go index d7075fd85..abc73b691 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go @@ -159,6 +159,36 @@ func (c *Client) addOperationGetGroupsForCapacityReservationMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go index f4bc61b4b..bbcd5e3f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go @@ -157,6 +157,36 @@ func (c *Client) addOperationGetHostReservationPurchasePreviewMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go index ed6bb30c3..55e4a486c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go @@ -16,7 +16,7 @@ import ( // // For more information, see [Block public access to your AMIs] in the Amazon EC2 User Guide. // -// [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis +// [Block public access to your AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-public-access-to-amis.html func (c *Client) GetImageBlockPublicAccessState(ctx context.Context, params *GetImageBlockPublicAccessStateInput, optFns ...func(*Options)) (*GetImageBlockPublicAccessStateOutput, error) { if params == nil { params = &GetImageBlockPublicAccessStateInput{} @@ -156,6 +156,36 @@ func (c *Client) addOperationGetImageBlockPublicAccessStateMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go index 44b922e06..ada2675f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go @@ -139,6 +139,36 @@ func (c *Client) addOperationGetInstanceMetadataDefaultsMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go index ee5c2cb6f..d237dc0da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go @@ -163,6 +163,36 @@ func (c *Client) addOperationGetInstanceTpmEkPubMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go index f43a3095b..7be1617f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go @@ -58,6 +58,9 @@ type GetInstanceTypesFromInstanceRequirementsInput struct { // This member is required. VirtualizationTypes []types.VirtualizationType + // Reserved. + Context *string + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is @@ -181,6 +184,36 @@ func (c *Client) addOperationGetInstanceTypesFromInstanceRequirementsMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go index 81726ac3b..ed425a72f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go @@ -158,6 +158,36 @@ func (c *Client) addOperationGetInstanceUefiDataMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go index 84b547046..ea38583b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go @@ -177,6 +177,36 @@ func (c *Client) addOperationGetIpamAddressHistoryMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go index 0ca614048..5dc99d00e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go @@ -165,6 +165,36 @@ func (c *Client) addOperationGetIpamDiscoveredAccountsMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go index d9195575b..3e28b7d41 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go @@ -165,6 +165,36 @@ func (c *Client) addOperationGetIpamDiscoveredPublicAddressesMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go index 5fe6bc08f..bf2c6b65d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go @@ -165,6 +165,36 @@ func (c *Client) addOperationGetIpamDiscoveredResourceCidrsMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go index ef1d1f0ca..ffc8b4fa8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go @@ -169,6 +169,36 @@ func (c *Client) addOperationGetIpamPoolAllocationsMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go index b0d8904f8..29f3b93ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go @@ -157,6 +157,36 @@ func (c *Client) addOperationGetIpamPoolCidrsMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go index 6fcebbdd7..aa62e6486 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go @@ -176,6 +176,36 @@ func (c *Client) addOperationGetIpamResourceCidrsMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go index 3cf2728f8..dfd37181a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go @@ -150,6 +150,36 @@ func (c *Client) addOperationGetLaunchTemplateDataMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go index 2c72a29d8..cbd862595 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go @@ -154,6 +154,36 @@ func (c *Client) addOperationGetManagedPrefixListAssociationsMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go index b9e52a382..23d6c6670 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go @@ -157,6 +157,36 @@ func (c *Client) addOperationGetManagedPrefixListEntriesMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go index 28a222611..e2d1b718a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go @@ -159,6 +159,36 @@ func (c *Client) addOperationGetNetworkInsightsAccessScopeAnalysisFindingsMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go index c191bd80c..3d897cab3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go @@ -142,6 +142,36 @@ func (c *Client) addOperationGetNetworkInsightsAccessScopeContentMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go index 9941ceda8..2b1d48a78 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go @@ -171,6 +171,36 @@ func (c *Client) addOperationGetPasswordDataMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go index f259cc0db..4bb152c64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go @@ -176,6 +176,36 @@ func (c *Client) addOperationGetReservedInstancesExchangeQuoteMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerAssociations.go index e83eaee6b..5af536792 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerAssociations.go @@ -149,6 +149,36 @@ func (c *Client) addOperationGetRouteServerAssociationsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerPropagations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerPropagations.go index 365d1b36d..b47df4cef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerPropagations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerPropagations.go @@ -170,6 +170,36 @@ func (c *Client) addOperationGetRouteServerPropagationsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerRoutingDatabase.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerRoutingDatabase.go index 9505b4e92..54a63b717 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerRoutingDatabase.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetRouteServerRoutingDatabase.go @@ -185,6 +185,36 @@ func (c *Client) addOperationGetRouteServerRoutingDatabaseMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go index 75d065c35..95e0effe7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go @@ -172,6 +172,36 @@ func (c *Client) addOperationGetSecurityGroupsForVpcMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go index 763d97618..eccba4255 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go @@ -148,6 +148,36 @@ func (c *Client) addOperationGetSerialConsoleAccessStatusMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go index e30b96096..49fb61eb6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go @@ -159,6 +159,36 @@ func (c *Client) addOperationGetSnapshotBlockPublicAccessStateMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go index 053b67154..60d7fb040 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go @@ -214,6 +214,36 @@ func (c *Client) addOperationGetSpotPlacementScoresMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go index 3d9822ec7..86aa48fc6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go @@ -171,6 +171,36 @@ func (c *Client) addOperationGetSubnetCidrReservationsMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go index 96f7ca3cd..941ae4b98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go @@ -159,6 +159,36 @@ func (c *Client) addOperationGetTransitGatewayAttachmentPropagationsMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go index 7ad4876cb..aa4d9bd9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go @@ -168,6 +168,36 @@ func (c *Client) addOperationGetTransitGatewayMulticastDomainAssociationsMiddlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go index 17f7b8790..eb11906ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go @@ -155,6 +155,36 @@ func (c *Client) addOperationGetTransitGatewayPolicyTableAssociationsMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go index ae1130e8c..5d4362682 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go @@ -152,6 +152,36 @@ func (c *Client) addOperationGetTransitGatewayPolicyTableEntriesMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go index cbd83aacf..3b49c8bf2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go @@ -174,6 +174,36 @@ func (c *Client) addOperationGetTransitGatewayPrefixListReferencesMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go index d3fea7c8c..f7771952b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go @@ -164,6 +164,36 @@ func (c *Client) addOperationGetTransitGatewayRouteTableAssociationsMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go index f8cf786a3..d40678d69 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go @@ -164,6 +164,36 @@ func (c *Client) addOperationGetTransitGatewayRouteTablePropagationsMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go index fb46192f8..a7c475903 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go @@ -144,6 +144,36 @@ func (c *Client) addOperationGetVerifiedAccessEndpointPolicyMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go index eabdea908..16f4fe84f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go @@ -153,6 +153,36 @@ func (c *Client) addOperationGetVerifiedAccessEndpointTargetsMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go index eb2bca1d5..3b1235e98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go @@ -144,6 +144,36 @@ func (c *Client) addOperationGetVerifiedAccessGroupPolicyMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go index f715998b1..3d12c0968 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go @@ -50,6 +50,11 @@ type GetVpnConnectionDeviceSampleConfigurationInput struct { // gateway device. You can specify one of the following versions: ikev1 or ikev2 . InternetKeyExchangeVersion *string + // The type of sample configuration to generate. Valid values are "compatibility" + // (includes IKEv1) or "recommended" (throws UnsupportedOperationException for + // IKEv1). + SampleType *string + noSmithyDocumentSerde } @@ -152,6 +157,36 @@ func (c *Client) addOperationGetVpnConnectionDeviceSampleConfigurationMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go index 8212f0c21..9d97dd9bb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go @@ -162,6 +162,36 @@ func (c *Client) addOperationGetVpnConnectionDeviceTypesMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go index a55e3d9b4..1f4cb5321 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go @@ -162,6 +162,36 @@ func (c *Client) addOperationGetVpnTunnelReplacementStatusMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go index f5ff8033d..4051d98d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go @@ -155,6 +155,36 @@ func (c *Client) addOperationImportClientVpnClientCertificateRevocationListMiddl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go index 92bb8b38a..9838058f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go @@ -301,6 +301,36 @@ func (c *Client) addOperationImportImageMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go index 2f95e9140..9ae504b3b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go @@ -165,6 +165,36 @@ func (c *Client) addOperationImportInstanceMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go index aa86e1013..bf00be37b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go @@ -172,6 +172,36 @@ func (c *Client) addOperationImportKeyPairMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go index f2219bf88..c663e4fa9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go @@ -204,6 +204,36 @@ func (c *Client) addOperationImportSnapshotMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go index a079c4f95..112e29789 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go @@ -38,11 +38,6 @@ func (c *Client) ImportVolume(ctx context.Context, params *ImportVolumeInput, op type ImportVolumeInput struct { - // The Availability Zone for the resulting EBS volume. - // - // This member is required. - AvailabilityZone *string - // The disk image. // // This member is required. @@ -53,6 +48,16 @@ type ImportVolumeInput struct { // This member is required. Volume *types.VolumeDetail + // The Availability Zone for the resulting EBS volume. + // + // Either AvailabilityZone or AvailabilityZoneId must be specified, but not both. + AvailabilityZone *string + + // The ID of the Availability Zone for the resulting EBS volume. + // + // Either AvailabilityZone or AvailabilityZoneId must be specified, but not both. + AvailabilityZoneId *string + // A description of the volume. Description *string @@ -164,6 +169,36 @@ func (c *Client) addOperationImportVolumeMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go index 274d87e22..e46d13f9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go @@ -156,6 +156,36 @@ func (c *Client) addOperationListImagesInRecycleBinMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go index 34fc82851..a93d841b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go @@ -153,6 +153,36 @@ func (c *Client) addOperationListSnapshotsInRecycleBinMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go index 682fe3e19..d381ba074 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go @@ -260,6 +260,36 @@ func (c *Client) addOperationLockSnapshotMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go index 84f8863e1..aca76ff21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go @@ -148,6 +148,36 @@ func (c *Client) addOperationModifyAddressAttributeMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go index afb33c524..e87b4e904 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go @@ -150,6 +150,36 @@ func (c *Client) addOperationModifyAvailabilityZoneGroupMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go index b0087674b..05860a7e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go @@ -210,6 +210,36 @@ func (c *Client) addOperationModifyCapacityReservationMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go index 76bc192a8..787e5542b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go @@ -176,6 +176,36 @@ func (c *Client) addOperationModifyCapacityReservationFleetMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go index df4363418..c6a737786 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go @@ -224,6 +224,36 @@ func (c *Client) addOperationModifyClientVpnEndpointMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go index a422898f6..06fa79bfa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go @@ -164,6 +164,36 @@ func (c *Client) addOperationModifyDefaultCreditSpecificationMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go index 62ef2035d..6d574270c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go @@ -16,9 +16,7 @@ import ( // Amazon Web Services creates a unique Amazon Web Services managed KMS key in // each Region for use with encryption by default. If you change the default KMS // key to a symmetric customer managed KMS key, it is used instead of the Amazon -// Web Services managed KMS key. To reset the default KMS key to the Amazon Web -// Services managed KMS key for EBS, use ResetEbsDefaultKmsKeyId. Amazon EBS does not support asymmetric -// KMS keys. +// Web Services managed KMS key. Amazon EBS does not support asymmetric KMS keys. // // If you delete or disable the customer managed KMS key that you specified for // use with encryption by default, your instances will fail to launch. @@ -176,6 +174,36 @@ func (c *Client) addOperationModifyEbsDefaultKmsKeyIdMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go index 04e733958..1714ce374 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go @@ -187,6 +187,36 @@ func (c *Client) addOperationModifyFleetMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go index b7b7d01d0..d62145741 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go @@ -169,6 +169,36 @@ func (c *Client) addOperationModifyFpgaImageAttributeMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go index db9e7e60a..09d11a825 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go @@ -182,6 +182,36 @@ func (c *Client) addOperationModifyHostsMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go index 671b64a91..87fce7698 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go @@ -170,6 +170,36 @@ func (c *Client) addOperationModifyIdFormatMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go index 62072ad52..ace234715 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go @@ -176,6 +176,36 @@ func (c *Client) addOperationModifyIdentityIdFormatMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go index 3f92bf30c..b6dcf188f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go @@ -200,6 +200,36 @@ func (c *Client) addOperationModifyImageAttributeMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go index 7d09b6ec8..852fd98fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go @@ -63,7 +63,7 @@ type ModifyInstanceAttributeInput struct { // attached. The volume must be owned by the caller. If no value is specified for // DeleteOnTermination , the default is true and the volume is deleted when the // instance is terminated. You can't modify the DeleteOnTermination attribute for - // volumes that are attached to Fargate tasks. + // volumes that are attached to Amazon Web Services-managed resources. // // To add instance store volumes to an Amazon EBS-backed instance, you must add // them when you launch the instance. For more information, see [Update the block device mapping when launching an instance]in the Amazon EC2 @@ -257,6 +257,36 @@ func (c *Client) addOperationModifyInstanceAttributeMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go index 58ab3247e..2ca76e027 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go @@ -150,6 +150,36 @@ func (c *Client) addOperationModifyInstanceCapacityReservationAttributesMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceConnectEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceConnectEndpoint.go new file mode 100644 index 000000000..fba94e745 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceConnectEndpoint.go @@ -0,0 +1,228 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Modifies the specified EC2 Instance Connect Endpoint. +// +// For more information, see [Modify an EC2 Instance Connect Endpoint] in the Amazon EC2 User Guide. +// +// [Modify an EC2 Instance Connect Endpoint]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/modify-ec2-instance-connect-endpoint.html +func (c *Client) ModifyInstanceConnectEndpoint(ctx context.Context, params *ModifyInstanceConnectEndpointInput, optFns ...func(*Options)) (*ModifyInstanceConnectEndpointOutput, error) { + if params == nil { + params = &ModifyInstanceConnectEndpointInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ModifyInstanceConnectEndpoint", params, optFns, c.addOperationModifyInstanceConnectEndpointMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ModifyInstanceConnectEndpointOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ModifyInstanceConnectEndpointInput struct { + + // The ID of the EC2 Instance Connect Endpoint to modify. + // + // This member is required. + InstanceConnectEndpointId *string + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + // The new IP address type for the EC2 Instance Connect Endpoint. + // + // PreserveClientIp is only supported on IPv4 EC2 Instance Connect Endpoints. To + // use PreserveClientIp , the value for IpAddressType must be ipv4 . + IpAddressType types.IpAddressType + + // Indicates whether the client IP address is preserved as the source. The + // following are the possible values. + // + // - true - Use the client IP address as the source. + // + // - false - Use the network interface IP address as the source. + // + // PreserveClientIp=true is only supported on IPv4 EC2 Instance Connect Endpoints. + // If modifying PreserveClientIp to true , either the endpoint's existing + // IpAddressType must be ipv4 , or if modifying IpAddressType in the same request, + // the new value must be ipv4 . + // + // Default: false + PreserveClientIp *bool + + // Changes the security groups for the EC2 Instance Connect Endpoint. The new set + // of groups you specify replaces the current set. You must specify at least one + // group, even if it's just the default security group in the VPC. You must specify + // the ID of the security group, not the name. + SecurityGroupIds []string + + noSmithyDocumentSerde +} + +type ModifyInstanceConnectEndpointOutput struct { + + // The return value of the request. Returns true if the specified product code is + // owned by the requester and associated with the specified instance. + Return *bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationModifyInstanceConnectEndpointMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpModifyInstanceConnectEndpoint{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpModifyInstanceConnectEndpoint{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ModifyInstanceConnectEndpoint"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpModifyInstanceConnectEndpointValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opModifyInstanceConnectEndpoint(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opModifyInstanceConnectEndpoint(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ModifyInstanceConnectEndpoint", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go index b5344013c..c66caa611 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go @@ -171,6 +171,36 @@ func (c *Client) addOperationModifyInstanceCpuOptionsMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go index 37d2ef4be..27bddae46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go @@ -158,6 +158,36 @@ func (c *Client) addOperationModifyInstanceCreditSpecificationMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go index 448cd409b..6a5d0503f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go @@ -153,6 +153,36 @@ func (c *Client) addOperationModifyInstanceEventStartTimeMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go index 7bf3386ba..5d9a1d894 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go @@ -184,6 +184,36 @@ func (c *Client) addOperationModifyInstanceEventWindowMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go index 907c0cd1d..7556eb6d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go @@ -16,7 +16,11 @@ import ( // not enable simplified automatic recovery for an unsupported instance type. For // more information, see [Simplified automatic recovery]. // +// Modifies the reboot migration behavior during a user-initiated reboot of an +// instance that has a pending system-reboot event. For more information, see [Enable or disable reboot migration]. +// // [Simplified automatic recovery]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-recover.html#instance-configuration-recovery +// [Enable or disable reboot migration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/schedevents_actions_reboot.html#reboot-migration func (c *Client) ModifyInstanceMaintenanceOptions(ctx context.Context, params *ModifyInstanceMaintenanceOptionsInput, optFns ...func(*Options)) (*ModifyInstanceMaintenanceOptionsOutput, error) { if params == nil { params = &ModifyInstanceMaintenanceOptionsInput{} @@ -48,6 +52,22 @@ type ModifyInstanceMaintenanceOptionsInput struct { // UnauthorizedOperation . DryRun *bool + // Specifies whether to attempt reboot migration during a user-initiated reboot of + // an instance that has a scheduled system-reboot event: + // + // - default - Amazon EC2 attempts to migrate the instance to new hardware + // (reboot migration). If successful, the system-reboot event is cleared. If + // unsuccessful, an in-place reboot occurs and the event remains scheduled. + // + // - disabled - Amazon EC2 keeps the instance on the same hardware (in-place + // reboot). The system-reboot event remains scheduled. + // + // This setting only applies to supported instances that have a scheduled reboot + // event. For more information, see [Enable or disable reboot migration]in the Amazon EC2 User Guide. + // + // [Enable or disable reboot migration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/schedevents_actions_reboot.html#reboot-migration + RebootMigration types.InstanceRebootMigrationState + noSmithyDocumentSerde } @@ -60,6 +80,22 @@ type ModifyInstanceMaintenanceOptionsOutput struct { // The ID of the instance. InstanceId *string + // Specifies whether to attempt reboot migration during a user-initiated reboot of + // an instance that has a scheduled system-reboot event: + // + // - default - Amazon EC2 attempts to migrate the instance to new hardware + // (reboot migration). If successful, the system-reboot event is cleared. If + // unsuccessful, an in-place reboot occurs and the event remains scheduled. + // + // - disabled - Amazon EC2 keeps the instance on the same hardware (in-place + // reboot). The system-reboot event remains scheduled. + // + // This setting only applies to supported instances that have a scheduled reboot + // event. For more information, see [Enable or disable reboot migration]in the Amazon EC2 User Guide. + // + // [Enable or disable reboot migration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/schedevents_actions_reboot.html#reboot-migration + RebootMigration types.InstanceRebootMigrationState + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -154,6 +190,36 @@ func (c *Client) addOperationModifyInstanceMaintenanceOptionsMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go index a1b3e5b75..93d00d794 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go @@ -168,6 +168,36 @@ func (c *Client) addOperationModifyInstanceMetadataDefaultsMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go index 476a09559..41e7bf188 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go @@ -206,6 +206,36 @@ func (c *Client) addOperationModifyInstanceMetadataOptionsMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go index 91ae753ce..397e5e552 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go @@ -162,6 +162,36 @@ func (c *Client) addOperationModifyInstanceNetworkPerformanceOptionsMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go index 54db51f36..da338d591 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go @@ -195,6 +195,36 @@ func (c *Client) addOperationModifyInstancePlacementMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go index 1fabbe16d..b97a4c2f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go @@ -185,6 +185,36 @@ func (c *Client) addOperationModifyIpamMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go index 438d0e61a..4d5b540f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go @@ -191,6 +191,36 @@ func (c *Client) addOperationModifyIpamPoolMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go index 87d45f4fb..692a7a342 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go @@ -176,6 +176,36 @@ func (c *Client) addOperationModifyIpamResourceCidrMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go index d124fb84d..b7e30492b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go @@ -185,6 +185,36 @@ func (c *Client) addOperationModifyIpamResourceDiscoveryMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go index c47c242d7..12822fb8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go @@ -145,6 +145,36 @@ func (c *Client) addOperationModifyIpamScopeMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go index aec93e538..81e4bad3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go @@ -165,6 +165,36 @@ func (c *Client) addOperationModifyLaunchTemplateMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go index f4c0ea688..9d52434ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go @@ -157,6 +157,36 @@ func (c *Client) addOperationModifyLocalGatewayRouteMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go index e732a83ae..3698b2b51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go @@ -168,6 +168,36 @@ func (c *Client) addOperationModifyManagedPrefixListMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go index c5567e0dd..b29c8f488 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go @@ -42,6 +42,9 @@ type ModifyNetworkInterfaceAttributeInput struct { // primary network interface (eth0). AssociatePublicIpAddress *bool + // A list of subnet IDs to associate with the network interface. + AssociatedSubnetIds []string + // Information about the interface attachment. If modifying the delete on // termination attribute, you must specify the ID of the interface attachment. Attachment *types.NetworkInterfaceAttachmentChanges @@ -189,6 +192,36 @@ func (c *Client) addOperationModifyNetworkInterfaceAttributeMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go index db6750b86..a9f3352ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go @@ -156,6 +156,36 @@ func (c *Client) addOperationModifyPrivateDnsNameOptionsMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPublicIpDnsNameOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPublicIpDnsNameOptions.go new file mode 100644 index 000000000..905d05da7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPublicIpDnsNameOptions.go @@ -0,0 +1,222 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Modify public hostname options for a network interface. For more information, +// see [EC2 instance hostnames, DNS names, and domains]in the Amazon EC2 User Guide. +// +// [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html +func (c *Client) ModifyPublicIpDnsNameOptions(ctx context.Context, params *ModifyPublicIpDnsNameOptionsInput, optFns ...func(*Options)) (*ModifyPublicIpDnsNameOptionsOutput, error) { + if params == nil { + params = &ModifyPublicIpDnsNameOptionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ModifyPublicIpDnsNameOptions", params, optFns, c.addOperationModifyPublicIpDnsNameOptionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ModifyPublicIpDnsNameOptionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ModifyPublicIpDnsNameOptionsInput struct { + + // The public hostname type. For more information, see [EC2 instance hostnames, DNS names, and domains] in the Amazon EC2 User + // Guide. + // + // - public-dual-stack-dns-name : A dual-stack public hostname for a network + // interface. Requests from within the VPC resolve to both the private IPv4 address + // and the IPv6 Global Unicast Address of the network interface. Requests from the + // internet resolve to both the public IPv4 and the IPv6 GUA address of the network + // interface. + // + // - public-ipv4-dns-name : An IPv4-enabled public hostname for a network + // interface. Requests from within the VPC resolve to the private primary IPv4 + // address of the network interface. Requests from the internet resolve to the + // public IPv4 address of the network interface. + // + // - public-ipv6-dns-name : An IPv6-enabled public hostname for a network + // interface. Requests from within the VPC or from the internet resolve to the IPv6 + // GUA of the network interface. + // + // [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html + // + // This member is required. + HostnameType types.PublicIpDnsOption + + // A network interface ID. + // + // This member is required. + NetworkInterfaceId *string + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have the + // required permissions, the error response is DryRunOperation . Otherwise, it is + // UnauthorizedOperation . + DryRun *bool + + noSmithyDocumentSerde +} + +type ModifyPublicIpDnsNameOptionsOutput struct { + + // Whether or not the request was successful. + Successful *bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationModifyPublicIpDnsNameOptionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsEc2query_serializeOpModifyPublicIpDnsNameOptions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsEc2query_deserializeOpModifyPublicIpDnsNameOptions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ModifyPublicIpDnsNameOptions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpModifyPublicIpDnsNameOptionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opModifyPublicIpDnsNameOptions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opModifyPublicIpDnsNameOptions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ModifyPublicIpDnsNameOptions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go index baa6b3a3c..f069b013d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go @@ -156,6 +156,36 @@ func (c *Client) addOperationModifyReservedInstancesMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyRouteServer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyRouteServer.go index 43566c5cd..14a24b864 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyRouteServer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyRouteServer.go @@ -197,6 +197,36 @@ func (c *Client) addOperationModifyRouteServerMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go index 6fdcf6bf4..ce2f57251 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go @@ -147,6 +147,36 @@ func (c *Client) addOperationModifySecurityGroupRulesMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go index 7859686eb..4d9e2ac29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go @@ -168,6 +168,36 @@ func (c *Client) addOperationModifySnapshotAttributeMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go index f7a5638ab..28b71de45 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go @@ -155,6 +155,36 @@ func (c *Client) addOperationModifySnapshotTierMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go index ade6b27e1..5c92311eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go @@ -189,6 +189,36 @@ func (c *Client) addOperationModifySpotFleetRequestMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go index 084623a55..e9d738197 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go @@ -217,6 +217,36 @@ func (c *Client) addOperationModifySubnetAttributeMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go index 0b0aff75a..fadb9b751 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go @@ -155,6 +155,36 @@ func (c *Client) addOperationModifyTrafficMirrorFilterNetworkServicesMiddlewares if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go index d53823b59..15a7e1fbb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go @@ -182,6 +182,36 @@ func (c *Client) addOperationModifyTrafficMirrorFilterRuleMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go index 42dc53e19..c909af97f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go @@ -179,6 +179,36 @@ func (c *Client) addOperationModifyTrafficMirrorSessionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go index 60e23b5cf..819068f17 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go @@ -150,6 +150,36 @@ func (c *Client) addOperationModifyTransitGatewayMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go index 9e4ea672b..1d4c470c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go @@ -154,6 +154,36 @@ func (c *Client) addOperationModifyTransitGatewayPrefixListReferenceMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go index 47cb7f654..311d05ded 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go @@ -152,6 +152,36 @@ func (c *Client) addOperationModifyTransitGatewayVpcAttachmentMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go index 5e55777e4..feaa85d89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go @@ -171,6 +171,36 @@ func (c *Client) addOperationModifyVerifiedAccessEndpointMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go index 28d75f54d..10fff7548 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go @@ -166,6 +166,36 @@ func (c *Client) addOperationModifyVerifiedAccessEndpointPolicyMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go index 36eb30823..4433d0e7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go @@ -157,6 +157,36 @@ func (c *Client) addOperationModifyVerifiedAccessGroupMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go index 2b9352893..6befadcd2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go @@ -166,6 +166,36 @@ func (c *Client) addOperationModifyVerifiedAccessGroupPolicyMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go index 492d54e4e..e49d8c8b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go @@ -158,6 +158,36 @@ func (c *Client) addOperationModifyVerifiedAccessInstanceMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go index ca23d8f2e..c3e90bd4a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go @@ -157,6 +157,36 @@ func (c *Client) addOperationModifyVerifiedAccessInstanceLoggingConfigurationMid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go index 5e8be30d6..0e28ab0a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go @@ -168,6 +168,36 @@ func (c *Client) addOperationModifyVerifiedAccessTrustProviderMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go index ce905f1fa..1ca6996a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go @@ -227,6 +227,36 @@ func (c *Client) addOperationModifyVolumeMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go index c15a08d12..382bb9e32 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go @@ -151,6 +151,36 @@ func (c *Client) addOperationModifyVolumeAttributeMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go index d75e7d116..c3969c60e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go @@ -153,6 +153,36 @@ func (c *Client) addOperationModifyVpcAttributeMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go index b70c3f466..8eb53ff61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go @@ -159,6 +159,36 @@ func (c *Client) addOperationModifyVpcBlockPublicAccessExclusionMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go index 0bfd6b6f6..80ff16f53 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go @@ -160,6 +160,36 @@ func (c *Client) addOperationModifyVpcBlockPublicAccessOptionsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go index 19f783008..36e0c62fc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go @@ -190,6 +190,36 @@ func (c *Client) addOperationModifyVpcEndpointMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go index 34ad32959..092f60e2c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go @@ -150,6 +150,36 @@ func (c *Client) addOperationModifyVpcEndpointConnectionNotificationMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go index f47c1d0db..e9ba00075 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go @@ -184,6 +184,36 @@ func (c *Client) addOperationModifyVpcEndpointServiceConfigurationMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go index 0623bbe71..41700ee67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go @@ -149,6 +149,36 @@ func (c *Client) addOperationModifyVpcEndpointServicePayerResponsibilityMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go index 356aecfe9..31371ff7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go @@ -162,6 +162,36 @@ func (c *Client) addOperationModifyVpcEndpointServicePermissionsMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go index e90c06c55..49290a1cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go @@ -164,6 +164,36 @@ func (c *Client) addOperationModifyVpcPeeringConnectionOptionsMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go index 9ff055442..6e8d8d3a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go @@ -157,6 +157,36 @@ func (c *Client) addOperationModifyVpcTenancyMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go index eb47726b8..fd70d2673 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go @@ -193,6 +193,36 @@ func (c *Client) addOperationModifyVpnConnectionMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go index 492a64585..221373912 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go @@ -167,6 +167,36 @@ func (c *Client) addOperationModifyVpnConnectionOptionsMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go index 41fd1b700..09ff8986f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go @@ -147,6 +147,36 @@ func (c *Client) addOperationModifyVpnTunnelCertificateMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go index 5659a47d1..a944a7029 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go @@ -55,6 +55,11 @@ type ModifyVpnTunnelOptionsInput struct { // UnauthorizedOperation . DryRun *bool + // Specifies the storage mode for the pre-shared key (PSK). Valid values are + // Standard (stored in Site-to-Site VPN service) or SecretsManager (stored in + // Amazon Web Services Secrets Manager). + PreSharedKeyStorage *string + // Choose whether or not to trigger immediate tunnel replacement. This is only // applicable when turning on or off EnableTunnelLifecycleControl . // @@ -163,6 +168,36 @@ func (c *Client) addOperationModifyVpnTunnelOptionsMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go index 9c1fc3efc..e895ce20e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go @@ -148,6 +148,36 @@ func (c *Client) addOperationMonitorInstancesMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go index d48870507..ebf319ea2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go @@ -16,10 +16,9 @@ import ( // Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC // platform. The Elastic IP address must be allocated to your account for more than // 24 hours, and it must not be associated with an instance. After the Elastic IP -// address is moved, it is no longer available for use in the EC2-Classic platform, -// unless you move it back using the RestoreAddressToClassicrequest. You cannot move an Elastic IP -// address that was originally allocated for use in the EC2-VPC platform to the -// EC2-Classic platform. +// address is moved, it is no longer available for use in the EC2-Classic platform. +// You cannot move an Elastic IP address that was originally allocated for use in +// the EC2-VPC platform to the EC2-Classic platform. func (c *Client) MoveAddressToVpc(ctx context.Context, params *MoveAddressToVpcInput, optFns ...func(*Options)) (*MoveAddressToVpcOutput, error) { if params == nil { params = &MoveAddressToVpcInput{} @@ -153,6 +152,36 @@ func (c *Client) addOperationMoveAddressToVpcMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go index 9820d6159..9677f4605 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go @@ -159,6 +159,36 @@ func (c *Client) addOperationMoveByoipCidrToIpamMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go index 831794af7..63ee961b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go @@ -184,6 +184,36 @@ func (c *Client) addOperationMoveCapacityReservationInstancesMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go index ebb73589d..3906c0769 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go @@ -14,7 +14,7 @@ import ( // Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services // resources through bring your own IP addresses (BYOIP) and creates a // corresponding address pool. After the address range is provisioned, it is ready -// to be advertised using AdvertiseByoipCidr. +// to be advertised. // // Amazon Web Services verifies that you own the address range and are authorized // to advertise it. You must ensure that the address range is registered to you and @@ -24,12 +24,10 @@ import ( // // Provisioning an address range is an asynchronous operation, so the call returns // immediately, but the address range is not ready to use until its status changes -// from pending-provision to provisioned . To monitor the status of an address -// range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress -// with either the specific address from the address pool or the ID of the address -// pool. +// from pending-provision to provisioned . For more information, see [Onboard your address range]. // // [Bring your own IP addresses (BYOIP)]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html +// [Onboard your address range]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/byoip-onboard.html func (c *Client) ProvisionByoipCidr(ctx context.Context, params *ProvisionByoipCidrInput, optFns ...func(*Options)) (*ProvisionByoipCidrOutput, error) { if params == nil { params = &ProvisionByoipCidrInput{} @@ -203,6 +201,36 @@ func (c *Client) addOperationProvisionByoipCidrMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go index e07342273..4891914ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go @@ -157,6 +157,36 @@ func (c *Client) addOperationProvisionIpamByoasnMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go index 85e722c61..fff25540d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go @@ -182,6 +182,36 @@ func (c *Client) addOperationProvisionIpamPoolCidrMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go index 03dac86a6..692d8686a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go @@ -168,6 +168,36 @@ func (c *Client) addOperationProvisionPublicIpv4PoolCidrMiddlewares(stack *middl if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go index a935ba93f..6b1a1a2d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go @@ -55,6 +55,9 @@ type PurchaseCapacityBlockInput struct { type PurchaseCapacityBlockOutput struct { + // The Capacity Block. + CapacityBlocks []types.CapacityBlock + // The Capacity Reservation. CapacityReservation *types.CapacityReservation @@ -152,6 +155,36 @@ func (c *Client) addOperationPurchaseCapacityBlockMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go index 36a59a061..0bf4baeb1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go @@ -148,6 +148,36 @@ func (c *Client) addOperationPurchaseCapacityBlockExtensionMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go index eeb6c7320..b274602f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go @@ -181,6 +181,36 @@ func (c *Client) addOperationPurchaseHostReservationMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go index 08fef06a1..7a94411d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go @@ -175,6 +175,36 @@ func (c *Client) addOperationPurchaseReservedInstancesOfferingMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go index 0a7a12d9d..5586faa44 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go @@ -163,6 +163,36 @@ func (c *Client) addOperationPurchaseScheduledInstancesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go index 4535284b0..6c1c9f343 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go @@ -147,6 +147,36 @@ func (c *Client) addOperationRebootInstancesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go index d055e7e26..3a4e73f49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go @@ -15,10 +15,6 @@ import ( // registering the AMI is the final step in the creation process. For more // information about creating AMIs, see [Create an AMI from a snapshot]and [Create an instance-store backed AMI] in the Amazon EC2 User Guide. // -// For Amazon EBS-backed instances, CreateImage creates and registers the AMI in a single -// request, so you don't have to register the AMI yourself. We recommend that you -// always use CreateImageunless you have a specific reason to use RegisterImage. -// // If needed, you can deregister an AMI at any time. Any modifications you make to // an AMI backed by an instance store volume invalidates its registration. If you // make changes to an image, deregister the previous image and register the new @@ -32,7 +28,7 @@ import ( // mapping. If the snapshot is encrypted, or encryption by default is enabled, the // root volume of an instance launched from the AMI is encrypted. // -// For more information, see [Create an AMI from a snapshot] and [Use encryption with Amazon EBS-backed AMIs] in the Amazon EC2 User Guide. +// For more information, see [Create an AMI from a snapshot] and [Use encryption with EBS-backed AMIs] in the Amazon EC2 User Guide. // // # Amazon Web Services Marketplace product codes // @@ -48,22 +44,22 @@ import ( // PlatformDetails field on the new AMI. If the field is empty or doesn't match the // expected operating system code (for example, Windows, RedHat, SUSE, or SQL), the // AMI creation was unsuccessful, and you should discard the AMI and instead create -// the AMI from an instance using CreateImage. For more information, see [Create an AMI from an instance] in the Amazon EC2 -// User Guide. +// the AMI from an instance. For more information, see [Create an AMI from an instance]in the Amazon EC2 User +// Guide. // // If you purchase a Reserved Instance to apply to an On-Demand Instance that was // launched from an AMI with a billing product code, make sure that the Reserved // Instance has the matching billing product code. If you purchase a Reserved -// Instance without the matching billing product code, the Reserved Instance will -// not be applied to the On-Demand Instance. For information about how to obtain -// the platform details and billing information of an AMI, see [Understand AMI billing information]in the Amazon EC2 -// User Guide. +// Instance without the matching billing product code, the Reserved Instance is not +// applied to the On-Demand Instance. For information about how to obtain the +// platform details and billing information of an AMI, see [Understand AMI billing information]in the Amazon EC2 User +// Guide. // +// [Use encryption with EBS-backed AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html // [Understand AMI billing information]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html // [Create an instance-store backed AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-instance-store.html // [Create an AMI from an instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#how-to-create-ebs-ami // [Create an AMI from a snapshot]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot -// [Use encryption with Amazon EBS-backed AMIs]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html func (c *Client) RegisterImage(ctx context.Context, params *RegisterImageInput, optFns ...func(*Options)) (*RegisterImageOutput, error) { if params == nil { params = &RegisterImageInput{} @@ -103,11 +99,11 @@ type RegisterImageInput struct { // If your account is not authorized to specify billing product codes, you can // publish AMIs that include billable software and list them on the Amazon Web // Services Marketplace. You must first register as a seller on the Amazon Web - // Services Marketplace. For more information, see [Getting started as a seller]and [AMI-based products] in the Amazon Web Services + // Services Marketplace. For more information, see [Getting started as an Amazon Web Services Marketplace seller]and [AMI-based products in Amazon Web Services Marketplace] in the Amazon Web Services // Marketplace Seller Guide. // - // [Getting started as a seller]: https://docs.aws.amazon.com/marketplace/latest/userguide/user-guide-for-sellers.html - // [AMI-based products]: https://docs.aws.amazon.com/marketplace/latest/userguide/ami-products.html + // [AMI-based products in Amazon Web Services Marketplace]: https://docs.aws.amazon.com/marketplace/latest/userguide/ami-products.html + // [Getting started as an Amazon Web Services Marketplace seller]: https://docs.aws.amazon.com/marketplace/latest/userguide/user-guide-for-sellers.html BillingProducts []string // The block device mapping entries. @@ -118,9 +114,9 @@ type RegisterImageInput struct { // If you create an AMI on an Outpost, then all backing snapshots must be on the // same Outpost or in the Region of that Outpost. AMIs on an Outpost that include // local snapshots can be used to launch instances on the same Outpost only. For - // more information, [Amazon EBS local snapshots on Outposts]in the Amazon EBS User Guide. + // more information, [Create AMIs from local snapshots]in the Amazon EBS User Guide. // - // [Amazon EBS local snapshots on Outposts]: https://docs.aws.amazon.com/ebs/latest/userguide/snapshots-outposts.html#ami + // [Create AMIs from local snapshots]: https://docs.aws.amazon.com/ebs/latest/userguide/snapshots-outposts.html#ami BlockDeviceMappings []types.BlockDeviceMapping // The boot mode of the AMI. A value of uefi-preferred indicates that the AMI @@ -129,9 +125,9 @@ type RegisterImageInput struct { // The operating system contained in the AMI must be configured to support the // specified boot mode. // - // For more information, see [Boot modes] in the Amazon EC2 User Guide. + // For more information, see [Instance launch behavior with Amazon EC2 boot modes] in the Amazon EC2 User Guide. // - // [Boot modes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html + // [Instance launch behavior with Amazon EC2 boot modes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html BootMode types.BootModeValues // A description for your AMI. @@ -152,10 +148,10 @@ type RegisterImageInput struct { // The full path to your AMI manifest in Amazon S3 storage. The specified bucket // must have the aws-exec-read canned access control list (ACL) to ensure that it - // can be accessed by Amazon EC2. For more information, see [Canned ACLs]in the Amazon S3 + // can be accessed by Amazon EC2. For more information, see [Canned ACL]in the Amazon S3 // Service Developer Guide. // - // [Canned ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl ImageLocation *string // Set to v2.0 to indicate that IMDSv2 is specified in the AMI. Instances launched @@ -206,9 +202,9 @@ type RegisterImageInput struct { // Base64 representation of the non-volatile UEFI variable store. To retrieve the // UEFI data, use the [GetInstanceUefiData]command. You can inspect and modify the UEFI data by using - // the [python-uefivars tool]on GitHub. For more information, see [UEFI Secure Boot] in the Amazon EC2 User Guide. + // the [python-uefivars tool]on GitHub. For more information, see [UEFI Secure Boot for Amazon EC2 instances] in the Amazon EC2 User Guide. // - // [UEFI Secure Boot]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html + // [UEFI Secure Boot for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/uefi-secure-boot.html // [GetInstanceUefiData]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceUefiData // [python-uefivars tool]: https://github.com/awslabs/python-uefivars UefiData *string @@ -321,6 +317,36 @@ func (c *Client) addOperationRegisterImageMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go index 8b75f8e1a..545074b37 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go @@ -147,6 +147,36 @@ func (c *Client) addOperationRegisterInstanceEventNotificationAttributesMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go index e4023f878..18b1079d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go @@ -160,6 +160,36 @@ func (c *Client) addOperationRegisterTransitGatewayMulticastGroupMembersMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go index 139c3f445..e436256ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go @@ -162,6 +162,36 @@ func (c *Client) addOperationRegisterTransitGatewayMulticastGroupSourcesMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go index e81596144..1468b0837 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go @@ -144,6 +144,36 @@ func (c *Client) addOperationRejectCapacityReservationBillingOwnershipMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go index fa6546e94..719ba1adc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go @@ -144,6 +144,36 @@ func (c *Client) addOperationRejectTransitGatewayMulticastDomainAssociationsMidd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go index 6d1f1b8d4..23fc6a63b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go @@ -142,6 +142,36 @@ func (c *Client) addOperationRejectTransitGatewayPeeringAttachmentMiddlewares(st if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go index 7bbe41ef9..8f65a2993 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go @@ -145,6 +145,36 @@ func (c *Client) addOperationRejectTransitGatewayVpcAttachmentMiddlewares(stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go index c869dae06..cfcc96c91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go @@ -147,6 +147,36 @@ func (c *Client) addOperationRejectVpcEndpointConnectionsMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go index 16313c5ce..4982f4569 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go @@ -144,6 +144,36 @@ func (c *Client) addOperationRejectVpcPeeringConnectionMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go index 5adfacaac..ce56eb27d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go @@ -13,12 +13,11 @@ import ( // Releases the specified Elastic IP address. // // [Default VPC] Releasing an Elastic IP address automatically disassociates it -// from any instance that it's associated with. To disassociate an Elastic IP -// address without releasing it, use DisassociateAddress. +// from any instance that it's associated with. Alternatively, you can disassociate +// an Elastic IP address without releasing it. // -// [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic IP address before -// you can release it. Otherwise, Amazon EC2 returns an error ( -// InvalidIPAddress.InUse ). +// [Nondefault VPC] You must disassociate the Elastic IP address before you can +// release it. Otherwise, Amazon EC2 returns an error ( InvalidIPAddress.InUse ). // // After releasing an Elastic IP address, it is released to the IP address pool. // Be sure to update your DNS records and any servers or devices that communicate @@ -27,7 +26,9 @@ import ( // allocated to another Amazon Web Services account. // // After you release an Elastic IP address, you might be able to recover it. For -// more information, see AllocateAddress. +// more information, see [Release an Elastic IP address]. +// +// [Release an Elastic IP address]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing-eips-releasing.html func (c *Client) ReleaseAddress(ctx context.Context, params *ReleaseAddressInput, optFns ...func(*Options)) (*ReleaseAddressOutput, error) { if params == nil { params = &ReleaseAddressInput{} @@ -159,6 +160,36 @@ func (c *Client) addOperationReleaseAddressMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go index 212b99180..488254edf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go @@ -150,6 +150,36 @@ func (c *Client) addOperationReleaseHostsMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go index a063b06a9..075979ad0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go @@ -162,6 +162,36 @@ func (c *Client) addOperationReleaseIpamPoolAllocationMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go index c7e6be166..9de0cb2d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go @@ -145,6 +145,36 @@ func (c *Client) addOperationReplaceIamInstanceProfileAssociationMiddlewares(sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go index e3eb32c73..cecfee4ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go @@ -147,6 +147,36 @@ func (c *Client) addOperationReplaceImageCriteriaInAllowedImagesSettingsMiddlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go index 5b34fd45a..faed0df1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go @@ -153,6 +153,36 @@ func (c *Client) addOperationReplaceNetworkAclAssociationMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go index 0d1d256b4..79e509968 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go @@ -185,6 +185,36 @@ func (c *Client) addOperationReplaceNetworkAclEntryMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go index 84db6836c..49050374a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go @@ -85,6 +85,9 @@ type ReplaceRouteInput struct { // The ID of a network interface. NetworkInterfaceId *string + // The Amazon Resource Name (ARN) of the ODB network. + OdbNetworkArn *string + // The ID of a transit gateway. TransitGatewayId *string @@ -192,6 +195,36 @@ func (c *Client) addOperationReplaceRouteMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go index fb79fc094..72568bee3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go @@ -159,6 +159,36 @@ func (c *Client) addOperationReplaceRouteTableAssociationMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go index 06c369180..f1f41b649 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go @@ -154,6 +154,36 @@ func (c *Client) addOperationReplaceTransitGatewayRouteMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go index 84bed6dc2..b26758149 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go @@ -149,6 +149,36 @@ func (c *Client) addOperationReplaceVpnTunnelMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go index 2172db87f..fd10a1856 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go @@ -186,6 +186,36 @@ func (c *Client) addOperationReportInstanceStatusMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go index 5db4d2505..0c2f7767f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go @@ -174,6 +174,36 @@ func (c *Client) addOperationRequestSpotFleetMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go index f0abba7ab..c973de95a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go @@ -242,6 +242,36 @@ func (c *Client) addOperationRequestSpotInstancesMiddlewares(stack *middleware.S if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go index 97839a172..84074d421 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go @@ -149,6 +149,36 @@ func (c *Client) addOperationResetAddressAttributeMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go index 70613ad59..6bfffda39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go @@ -141,6 +141,36 @@ func (c *Client) addOperationResetEbsDefaultKmsKeyIdMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go index cce50b258..6452560a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go @@ -146,6 +146,36 @@ func (c *Client) addOperationResetFpgaImageAttributeMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go index 4782499b2..c1e721577 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go @@ -145,6 +145,36 @@ func (c *Client) addOperationResetImageAttributeMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go index dfbae0e39..5a4abd5a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go @@ -154,6 +154,36 @@ func (c *Client) addOperationResetInstanceAttributeMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go index 741d64102..2b9177c2b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go @@ -142,6 +142,36 @@ func (c *Client) addOperationResetNetworkInterfaceAttributeMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go index b44d4b725..911a49380 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go @@ -149,6 +149,36 @@ func (c *Client) addOperationResetSnapshotAttributeMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go index afdf13c84..056c2b499 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go @@ -150,6 +150,36 @@ func (c *Client) addOperationRestoreAddressToClassicMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go index 1e80d93f9..77cadf27c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go @@ -10,10 +10,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Restores an AMI from the Recycle Bin. For more information, see [Recycle Bin] in the Amazon +// Restores an AMI from the Recycle Bin. For more information, see [Recover deleted Amazon EBS snapshots and EBS-back AMIs with Recycle Bin] in the Amazon // EC2 User Guide. // -// [Recycle Bin]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html +// [Recover deleted Amazon EBS snapshots and EBS-back AMIs with Recycle Bin]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recycle-bin.html func (c *Client) RestoreImageFromRecycleBin(ctx context.Context, params *RestoreImageFromRecycleBinInput, optFns ...func(*Options)) (*RestoreImageFromRecycleBinOutput, error) { if params == nil { params = &RestoreImageFromRecycleBinInput{} @@ -144,6 +144,36 @@ func (c *Client) addOperationRestoreImageFromRecycleBinMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go index e712cecd9..94f1ac400 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go @@ -153,6 +153,36 @@ func (c *Client) addOperationRestoreManagedPrefixListVersionMiddlewares(stack *m if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go index 5e5e0e28c..ea1bf987d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go @@ -179,6 +179,36 @@ func (c *Client) addOperationRestoreSnapshotFromRecycleBinMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go index 159304187..d8cdbcea6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go @@ -173,6 +173,36 @@ func (c *Client) addOperationRestoreSnapshotTierMiddlewares(stack *middleware.St if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go index 7a287609a..e65c1e244 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go @@ -158,6 +158,36 @@ func (c *Client) addOperationRevokeClientVpnIngressMiddlewares(stack *middleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go index 4b95eae30..bed2fc71a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go @@ -197,6 +197,36 @@ func (c *Client) addOperationRevokeSecurityGroupEgressMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go index 0d29a58a2..ec48dba0e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go @@ -208,6 +208,36 @@ func (c *Client) addOperationRevokeSecurityGroupIngressMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go index fb3d480ae..98ec2ee6c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go @@ -125,11 +125,11 @@ type RunInstancesInput struct { // request. If you do not specify a client token, a randomly generated token is // used for the request to ensure idempotency. // - // For more information, see [Ensuring Idempotency]. + // For more information, see [Ensuring idempotency in Amazon EC2 API requests]. // // Constraints: Maximum 64 ASCII characters // - // [Ensuring Idempotency]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html + // [Ensuring idempotency in Amazon EC2 API requests]: https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html ClientToken *string // The CPU options for the instance. For more information, see [Optimize CPU options] in the Amazon EC2 @@ -151,9 +151,9 @@ type RunInstancesInput struct { CreditSpecification *types.CreditSpecificationRequest // Indicates whether an instance is enabled for stop protection. For more - // information, see [Stop protection]. + // information, see [Enable stop protection for your EC2 instances]. // - // [Stop protection]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html#Using_StopProtection + // [Enable stop protection for your EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html DisableApiStop *bool // Indicates whether termination protection is enabled for the instance. The @@ -181,11 +181,17 @@ type RunInstancesInput struct { // An elastic GPU to associate with the instance. // // Amazon Elastic Graphics reached end of life on January 8, 2024. + // + // Deprecated: Specifying Elastic Graphics accelerators is no longer supported on + // the RunInstances API. ElasticGpuSpecification []types.ElasticGpuSpecification // An elastic inference accelerator to associate with the instance. // // Amazon Elastic Inference is no longer available. + // + // Deprecated: Specifying Elastic Inference accelerators is no longer supported on + // the RunInstances API. ElasticInferenceAccelerators []types.ElasticInferenceAccelerator // If you’re launching an instance into a dual-stack or IPv6-only subnet, you can @@ -204,13 +210,12 @@ type RunInstancesInput struct { EnablePrimaryIpv6 *bool // Indicates whether the instance is enabled for Amazon Web Services Nitro - // Enclaves. For more information, see [What is Amazon Web Services Nitro Enclaves?]in the Amazon Web Services Nitro Enclaves - // User Guide. + // Enclaves. For more information, see [Amazon Web Services Nitro Enclaves User Guide]. // // You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same // instance. // - // [What is Amazon Web Services Nitro Enclaves?]: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html + // [Amazon Web Services Nitro Enclaves User Guide]: https://docs.aws.amazon.com/enclaves/latest/user/ EnclaveOptions *types.EnclaveOptionsRequest // Indicates whether an instance is enabled for hibernation. This parameter is @@ -243,9 +248,9 @@ type RunInstancesInput struct { // InstanceInterruptionBehavior is set to either hibernate or stop . InstanceMarketOptions *types.InstanceMarketOptionsRequest - // The instance type. For more information, see [Amazon EC2 instance types] in the Amazon EC2 User Guide. + // The instance type. For more information, see [Amazon EC2 Instance Types Guide]. // - // [Amazon EC2 instance types]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html + // [Amazon EC2 Instance Types Guide]: https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-types.html InstanceType types.InstanceType // The number of IPv6 addresses to associate with the primary network interface. @@ -275,13 +280,12 @@ type RunInstancesInput struct { // [PV-GRUB]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html KernelId *string - // The name of the key pair. You can create a key pair using [CreateKeyPair] or [ImportKeyPair]. + // The name of the key pair. For more information, see [Create a key pair for your EC2 instance]. // // If you do not specify a key pair, you can't connect to the instance unless you // choose an AMI that is configured to allow users another way to log in. // - // [ImportKeyPair]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html - // [CreateKeyPair]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html + // [Create a key pair for your EC2 instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-key-pairs.html KeyName *string // The launch template. Any additional parameters that you specify for the new @@ -294,9 +298,9 @@ type RunInstancesInput struct { // The maintenance and recovery options for the instance. MaintenanceOptions *types.InstanceMaintenanceOptionsRequest - // The metadata options for the instance. For more information, see [Instance metadata and user data]. + // The metadata options for the instance. For more information, see [Configure the Instance Metadata Service options]. // - // [Instance metadata and user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + // [Configure the Instance Metadata Service options]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html MetadataOptions *types.InstanceMetadataOptionsRequest // Specifies whether detailed monitoring is enabled for the instance. @@ -342,12 +346,10 @@ type RunInstancesInput struct { // [PV-GRUB]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html RamdiskId *string - // The IDs of the security groups. You can create a security group using [CreateSecurityGroup]. + // The IDs of the security groups. // // If you specify a network interface, you must specify any security groups as // part of the network interface instead of using this parameter. - // - // [CreateSecurityGroup]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateSecurityGroup.html SecurityGroupIds []string // [Default VPC] The names of the security groups. @@ -383,9 +385,9 @@ type RunInstancesInput struct { // The user data to make available to the instance. User data must be // base64-encoded. Depending on the tool or SDK that you're using, the - // base64-encoding might be performed for you. For more information, see [Work with instance user data]. + // base64-encoding might be performed for you. For more information, see [Run commands at launch using instance user data]. // - // [Work with instance user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html + // [Run commands at launch using instance user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html UserData *string noSmithyDocumentSerde @@ -509,6 +511,36 @@ func (c *Client) addOperationRunInstancesMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go index 115a5ea02..ee00888d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go @@ -172,6 +172,36 @@ func (c *Client) addOperationRunScheduledInstancesMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go index ce524019c..8d66be9c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go @@ -175,6 +175,36 @@ func (c *Client) addOperationSearchLocalGatewayRoutesMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go index bbb9cdef5..7a621948f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go @@ -178,6 +178,36 @@ func (c *Client) addOperationSearchTransitGatewayMulticastGroupsMiddlewares(stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go index d74b9d0eb..5566aecc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go @@ -181,6 +181,36 @@ func (c *Client) addOperationSearchTransitGatewayRoutesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go index 564dea3bd..cc690b117 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go @@ -154,6 +154,36 @@ func (c *Client) addOperationSendDiagnosticInterruptMiddlewares(stack *middlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go index e2bc45b3d..3053a0a0f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go @@ -21,9 +21,9 @@ import ( // (OU) or root (the entire Amazon Web Services Organization). // // The report is saved to your specified S3 bucket, using the following path -// structure (with the italicized placeholders representing your specific values): +// structure (with the capitalized placeholders representing your specific values): // -// s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv +// s3://AMZN-S3-DEMO-BUCKET/YOUR-OPTIONAL-S3-PREFIX/ec2_TARGETID_REPORTID_YYYYMMDDTHHMMZ.csv // // Prerequisites for generating a report // @@ -33,7 +33,7 @@ import ( // - An S3 bucket must be available before generating the report (you can create // a new one or use an existing one), it must be in the same Region where the // report generation request is made, and it must have an appropriate bucket -// policy. For a sample S3 policy, see Sample Amazon S3 policy under . +// policy. For a sample S3 policy, see Sample Amazon S3 policy under [Examples]. // // - Trusted access must be enabled for the service for which the declarative // policy will enforce a baseline configuration. If you use the Amazon Web Services @@ -51,6 +51,7 @@ import ( // // [Generating the account status report for declarative policies]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_declarative_status-report.html // [Using Organizations with other Amazon Web Services services]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html +// [Examples]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_StartDeclarativePoliciesReport.html#API_StartDeclarativePoliciesReport_Examples func (c *Client) StartDeclarativePoliciesReport(ctx context.Context, params *StartDeclarativePoliciesReportInput, optFns ...func(*Options)) (*StartDeclarativePoliciesReportOutput, error) { if params == nil { params = &StartDeclarativePoliciesReportInput{} @@ -201,6 +202,36 @@ func (c *Client) addOperationStartDeclarativePoliciesReportMiddlewares(stack *mi if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go index c1b55560c..bc7ef853b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go @@ -168,6 +168,36 @@ func (c *Client) addOperationStartInstancesMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go index dab841f97..3a0ccd138 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go @@ -156,6 +156,36 @@ func (c *Client) addOperationStartNetworkInsightsAccessScopeAnalysisMiddlewares( if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go index 53918d51f..d7d3a86c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go @@ -166,6 +166,36 @@ func (c *Client) addOperationStartNetworkInsightsAnalysisMiddlewares(stack *midd if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go index 8ee0dff8b..9e6f5342e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go @@ -148,6 +148,36 @@ func (c *Client) addOperationStartVpcEndpointServicePrivateDnsVerificationMiddle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go index 674e95ea8..2af8abb09 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go @@ -11,22 +11,24 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Stops an Amazon EBS-backed instance. For more information, see [Stop and start Amazon EC2 instances] in the Amazon -// EC2 User Guide. +// Stops an Amazon EBS-backed instance. You can restart your instance at any time +// using the [StartInstances]API. For more information, see [Stop and start Amazon EC2 instances] in the Amazon EC2 User Guide. // -// When you stop an instance, we shut it down. You can restart your instance at -// any time. +// When you stop or hibernate an instance, we shut it down. By default, this +// includes a graceful operating system (OS) shutdown. To bypass the graceful +// shutdown, use the skipOsShutdown parameter; however, this might risk data +// integrity. // -// You can use the Stop operation together with the Hibernate parameter to -// hibernate an instance if the instance is [enabled for hibernation]and meets the [hibernation prerequisites]. Stopping an instance -// doesn't preserve data stored in RAM, while hibernation does. If hibernation -// fails, a normal shutdown occurs. For more information, see [Hibernate your Amazon EC2 instance]in the Amazon EC2 -// User Guide. +// You can use the StopInstances operation together with the Hibernate parameter +// to hibernate an instance if the instance is [enabled for hibernation]and meets the [hibernation prerequisites]. Stopping an +// instance doesn't preserve data stored in RAM, while hibernation does. If +// hibernation fails, a normal shutdown occurs. For more information, see [Hibernate your Amazon EC2 instance]in the +// Amazon EC2 User Guide. // // If your instance appears stuck in the stopping state, there might be an issue -// with the underlying host computer. You can use the Stop operation together with -// the Force parameter to force stop your instance. For more information, see [Troubleshoot Amazon EC2 instance stop issues]in -// the Amazon EC2 User Guide. +// with the underlying host computer. You can use the StopInstances operation +// together with the Force parameter to force stop your instance. For more +// information, see [Troubleshoot Amazon EC2 instance stop issues]in the Amazon EC2 User Guide. // // Stopping and hibernating an instance differs from rebooting or terminating it. // For example, a stopped or hibernated instance retains its root volume and any @@ -48,6 +50,7 @@ import ( // [Hibernate your Amazon EC2 instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html // [Amazon EC2 instance state changes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html // [enabled for hibernation]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enabling-hibernation.html +// [StartInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_StartInstances.html // [hibernation prerequisites]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html func (c *Client) StopInstances(ctx context.Context, params *StopInstancesInput, optFns ...func(*Options)) (*StopInstancesOutput, error) { if params == nil { @@ -93,13 +96,23 @@ type StopInstancesInput struct { // Hibernates the instance if the instance was enabled for hibernation at launch. // If the instance cannot hibernate successfully, a normal shutdown occurs. For - // more information, see [Hibernate your instance]in the Amazon EC2 User Guide. + // more information, see [Hibernate your Amazon EC2 instance]in the Amazon EC2 User Guide. // // Default: false // - // [Hibernate your instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html + // [Hibernate your Amazon EC2 instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html Hibernate *bool + // Specifies whether to bypass the graceful OS shutdown process when the instance + // is stopped. + // + // Bypassing the graceful OS shutdown might result in data loss or corruption (for + // example, memory contents not flushed to disk or loss of in-flight IOs) or + // skipped shutdown scripts. + // + // Default: false + SkipOsShutdown *bool + noSmithyDocumentSerde } @@ -202,6 +215,36 @@ func (c *Client) addOperationStopInstancesMiddlewares(stack *middleware.Stack, o if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go index 02c9fc6c7..5ed5c2681 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go @@ -158,6 +158,36 @@ func (c *Client) addOperationTerminateClientVpnConnectionsMiddlewares(stack *mid if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go index 2331b48a0..af7e435b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go @@ -53,6 +53,10 @@ import ( // By default, Amazon EC2 deletes all EBS volumes that were attached when the // instance launched. Volumes attached after instance launch continue running. // +// By default, the TerminateInstances operation includes a graceful operating +// system (OS) shutdown. To bypass the graceful shutdown, use the skipOsShutdown +// parameter; however, this might risk data integrity. +// // You can stop, start, and terminate EBS-backed instances. You can only terminate // instance store-backed instances. What happens to an instance differs if you stop // or terminate it. For example, when you stop an instance, the root device and any @@ -62,11 +66,16 @@ import ( // differences between stopping and terminating instances, see [Amazon EC2 instance state changes]in the Amazon EC2 // User Guide. // -// For information about troubleshooting, see [Troubleshooting terminating your instance] in the Amazon EC2 User Guide. +// When you terminate an instance, we attempt to terminate it forcibly after a +// short while. If your instance appears stuck in the shutting-down state after a +// period of time, there might be an issue with the underlying host computer. For +// more information about terminating and troubleshooting terminating your +// instances, see [Terminate Amazon EC2 instances]and [Troubleshooting terminating your instance] in the Amazon EC2 User Guide. // // [idempotent]: https://docs.aws.amazon.com/ec2/latest/devguide/ec2-api-idempotency.html // [Troubleshooting terminating your instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html // [Amazon EC2 instance state changes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html +// [Terminate Amazon EC2 instances]: https://docs.aws.amazon.com/ func (c *Client) TerminateInstances(ctx context.Context, params *TerminateInstancesInput, optFns ...func(*Options)) (*TerminateInstancesOutput, error) { if params == nil { params = &TerminateInstancesInput{} @@ -98,6 +107,18 @@ type TerminateInstancesInput struct { // UnauthorizedOperation . DryRun *bool + // Forces the instances to terminate. The instance will first attempt a graceful + // shutdown, which includes flushing file system caches and metadata. If the + // graceful shutdown fails to complete within the timeout period, the instance + // shuts down forcibly without flushing the file system caches and metadata. + Force *bool + + // Specifies whether to bypass the graceful OS shutdown process when the instance + // is terminated. + // + // Default: false + SkipOsShutdown *bool + noSmithyDocumentSerde } @@ -200,6 +221,36 @@ func (c *Client) addOperationTerminateInstancesMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go index 9391b4ea7..af5a3796e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go @@ -148,6 +148,36 @@ func (c *Client) addOperationUnassignIpv6AddressesMiddlewares(stack *middleware. if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go index 5e0947232..84b41051b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go @@ -140,6 +140,36 @@ func (c *Client) addOperationUnassignPrivateIpAddressesMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go index bda920aef..7696ab020 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go @@ -168,6 +168,36 @@ func (c *Client) addOperationUnassignPrivateNatGatewayAddressMiddlewares(stack * if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go index f012a89ca..7b9a8f373 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go @@ -143,6 +143,36 @@ func (c *Client) addOperationUnlockSnapshotMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go index 35e27ca55..e789daa82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go @@ -145,6 +145,36 @@ func (c *Client) addOperationUnmonitorInstancesMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go index 690480f66..6ebaa3ee7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go @@ -154,6 +154,36 @@ func (c *Client) addOperationUpdateSecurityGroupRuleDescriptionsEgressMiddleware if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go index f2ee39e81..844c04f9e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go @@ -155,6 +155,36 @@ func (c *Client) addOperationUpdateSecurityGroupRuleDescriptionsIngressMiddlewar if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go index abb6d6536..bd9a6c45f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go @@ -148,6 +148,36 @@ func (c *Client) addOperationWithdrawByoipCidrMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addInterceptBeforeRetryLoop(stack, options); err != nil { + return err + } + if err = addInterceptAttempt(stack, options); err != nil { + return err + } + if err = addInterceptExecution(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSerialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterSerialization(stack, options); err != nil { + return err + } + if err = addInterceptBeforeSigning(stack, options); err != nil { + return err + } + if err = addInterceptAfterSigning(stack, options); err != nil { + return err + } + if err = addInterceptTransmit(stack, options); err != nil { + return err + } + if err = addInterceptBeforeDeserialization(stack, options); err != nil { + return err + } + if err = addInterceptAfterDeserialization(stack, options); err != nil { + return err + } if err = addSpanInitializeStart(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/auth.go index 1fe418e02..a8825dad3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/auth.go @@ -12,6 +12,8 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" + "slices" + "strings" ) func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { @@ -169,7 +171,8 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { + sorted := sortAuthOptions(options, m.options.AuthSchemePreference) + for _, option := range sorted { if option.SchemeID == smithyauth.SchemeIDAnonymous { return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true } @@ -188,6 +191,29 @@ func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) return nil, false } +func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option { + byPriority := make([]*smithyauth.Option, 0, len(options)) + for _, prefName := range preferred { + for _, option := range options { + optName := option.SchemeID + if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 { + optName = parts[1] + } + if prefName == optName { + byPriority = append(byPriority, option) + } + } + } + for _, option := range options { + if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool { + return o.SchemeID == option.SchemeID + }) { + byPriority = append(byPriority, option) + } + } + return byPriority +} + type resolvedAuthSchemeKey struct{} type resolvedAuthScheme struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go index 537360096..40c1b579d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go @@ -23,17 +23,8 @@ import ( "io/ioutil" "strconv" "strings" - "time" ) -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - type awsEc2query_deserializeOpAcceptAddressTransfer struct { } @@ -6687,14 +6678,14 @@ func awsEc2query_deserializeOpErrorCreateDefaultVpc(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpCreateDhcpOptions struct { +type awsEc2query_deserializeOpCreateDelegateMacVolumeOwnershipTask struct { } -func (*awsEc2query_deserializeOpCreateDhcpOptions) ID() string { +func (*awsEc2query_deserializeOpCreateDelegateMacVolumeOwnershipTask) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateDhcpOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateDelegateMacVolumeOwnershipTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -6712,9 +6703,9 @@ func (m *awsEc2query_deserializeOpCreateDhcpOptions) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateDhcpOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateDelegateMacVolumeOwnershipTask(response, &metadata) } - output := &CreateDhcpOptionsOutput{} + output := &CreateDelegateMacVolumeOwnershipTaskOutput{} out.Result = output var buff [1024]byte @@ -6735,7 +6726,7 @@ func (m *awsEc2query_deserializeOpCreateDhcpOptions) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateDhcpOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateDelegateMacVolumeOwnershipTaskOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -6749,7 +6740,7 @@ func (m *awsEc2query_deserializeOpCreateDhcpOptions) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateDhcpOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateDelegateMacVolumeOwnershipTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -6782,204 +6773,14 @@ func awsEc2query_deserializeOpErrorCreateDhcpOptions(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpCreateEgressOnlyInternetGateway struct { -} - -func (*awsEc2query_deserializeOpCreateEgressOnlyInternetGateway) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpCreateEgressOnlyInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateEgressOnlyInternetGateway(response, &metadata) - } - output := &CreateEgressOnlyInternetGatewayOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateEgressOnlyInternetGatewayOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorCreateEgressOnlyInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpCreateFleet struct { -} - -func (*awsEc2query_deserializeOpCreateFleet) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpCreateFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateFleet(response, &metadata) - } - output := &CreateFleetOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateFleetOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorCreateFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpCreateFlowLogs struct { +type awsEc2query_deserializeOpCreateDhcpOptions struct { } -func (*awsEc2query_deserializeOpCreateFlowLogs) ID() string { +func (*awsEc2query_deserializeOpCreateDhcpOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateFlowLogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateDhcpOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -6997,9 +6798,9 @@ func (m *awsEc2query_deserializeOpCreateFlowLogs) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateFlowLogs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateDhcpOptions(response, &metadata) } - output := &CreateFlowLogsOutput{} + output := &CreateDhcpOptionsOutput{} out.Result = output var buff [1024]byte @@ -7020,7 +6821,7 @@ func (m *awsEc2query_deserializeOpCreateFlowLogs) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateFlowLogsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateDhcpOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7034,7 +6835,7 @@ func (m *awsEc2query_deserializeOpCreateFlowLogs) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateFlowLogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateDhcpOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7067,14 +6868,14 @@ func awsEc2query_deserializeOpErrorCreateFlowLogs(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpCreateFpgaImage struct { +type awsEc2query_deserializeOpCreateEgressOnlyInternetGateway struct { } -func (*awsEc2query_deserializeOpCreateFpgaImage) ID() string { +func (*awsEc2query_deserializeOpCreateEgressOnlyInternetGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateFpgaImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateEgressOnlyInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7092,9 +6893,9 @@ func (m *awsEc2query_deserializeOpCreateFpgaImage) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateFpgaImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateEgressOnlyInternetGateway(response, &metadata) } - output := &CreateFpgaImageOutput{} + output := &CreateEgressOnlyInternetGatewayOutput{} out.Result = output var buff [1024]byte @@ -7115,7 +6916,7 @@ func (m *awsEc2query_deserializeOpCreateFpgaImage) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateFpgaImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateEgressOnlyInternetGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7129,7 +6930,7 @@ func (m *awsEc2query_deserializeOpCreateFpgaImage) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateFpgaImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateEgressOnlyInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7162,14 +6963,14 @@ func awsEc2query_deserializeOpErrorCreateFpgaImage(response *smithyhttp.Response } } -type awsEc2query_deserializeOpCreateImage struct { +type awsEc2query_deserializeOpCreateFleet struct { } -func (*awsEc2query_deserializeOpCreateImage) ID() string { +func (*awsEc2query_deserializeOpCreateFleet) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7187,9 +6988,9 @@ func (m *awsEc2query_deserializeOpCreateImage) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateFleet(response, &metadata) } - output := &CreateImageOutput{} + output := &CreateFleetOutput{} out.Result = output var buff [1024]byte @@ -7210,7 +7011,7 @@ func (m *awsEc2query_deserializeOpCreateImage) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateFleetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7224,7 +7025,7 @@ func (m *awsEc2query_deserializeOpCreateImage) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7257,14 +7058,14 @@ func awsEc2query_deserializeOpErrorCreateImage(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpCreateInstanceConnectEndpoint struct { +type awsEc2query_deserializeOpCreateFlowLogs struct { } -func (*awsEc2query_deserializeOpCreateInstanceConnectEndpoint) ID() string { +func (*awsEc2query_deserializeOpCreateFlowLogs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateInstanceConnectEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateFlowLogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7282,9 +7083,9 @@ func (m *awsEc2query_deserializeOpCreateInstanceConnectEndpoint) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateInstanceConnectEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateFlowLogs(response, &metadata) } - output := &CreateInstanceConnectEndpointOutput{} + output := &CreateFlowLogsOutput{} out.Result = output var buff [1024]byte @@ -7305,7 +7106,7 @@ func (m *awsEc2query_deserializeOpCreateInstanceConnectEndpoint) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateInstanceConnectEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateFlowLogsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7319,7 +7120,7 @@ func (m *awsEc2query_deserializeOpCreateInstanceConnectEndpoint) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateInstanceConnectEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateFlowLogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7352,14 +7153,14 @@ func awsEc2query_deserializeOpErrorCreateInstanceConnectEndpoint(response *smith } } -type awsEc2query_deserializeOpCreateInstanceEventWindow struct { +type awsEc2query_deserializeOpCreateFpgaImage struct { } -func (*awsEc2query_deserializeOpCreateInstanceEventWindow) ID() string { +func (*awsEc2query_deserializeOpCreateFpgaImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateFpgaImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7377,9 +7178,9 @@ func (m *awsEc2query_deserializeOpCreateInstanceEventWindow) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateInstanceEventWindow(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateFpgaImage(response, &metadata) } - output := &CreateInstanceEventWindowOutput{} + output := &CreateFpgaImageOutput{} out.Result = output var buff [1024]byte @@ -7400,7 +7201,7 @@ func (m *awsEc2query_deserializeOpCreateInstanceEventWindow) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateInstanceEventWindowOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateFpgaImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7414,7 +7215,7 @@ func (m *awsEc2query_deserializeOpCreateInstanceEventWindow) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateFpgaImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7447,14 +7248,14 @@ func awsEc2query_deserializeOpErrorCreateInstanceEventWindow(response *smithyhtt } } -type awsEc2query_deserializeOpCreateInstanceExportTask struct { +type awsEc2query_deserializeOpCreateImage struct { } -func (*awsEc2query_deserializeOpCreateInstanceExportTask) ID() string { +func (*awsEc2query_deserializeOpCreateImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateInstanceExportTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7472,9 +7273,9 @@ func (m *awsEc2query_deserializeOpCreateInstanceExportTask) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateInstanceExportTask(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateImage(response, &metadata) } - output := &CreateInstanceExportTaskOutput{} + output := &CreateImageOutput{} out.Result = output var buff [1024]byte @@ -7495,7 +7296,7 @@ func (m *awsEc2query_deserializeOpCreateInstanceExportTask) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateInstanceExportTaskOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7509,7 +7310,7 @@ func (m *awsEc2query_deserializeOpCreateInstanceExportTask) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateInstanceExportTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7542,14 +7343,14 @@ func awsEc2query_deserializeOpErrorCreateInstanceExportTask(response *smithyhttp } } -type awsEc2query_deserializeOpCreateInternetGateway struct { +type awsEc2query_deserializeOpCreateImageUsageReport struct { } -func (*awsEc2query_deserializeOpCreateInternetGateway) ID() string { +func (*awsEc2query_deserializeOpCreateImageUsageReport) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateImageUsageReport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7567,9 +7368,9 @@ func (m *awsEc2query_deserializeOpCreateInternetGateway) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateInternetGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateImageUsageReport(response, &metadata) } - output := &CreateInternetGatewayOutput{} + output := &CreateImageUsageReportOutput{} out.Result = output var buff [1024]byte @@ -7590,7 +7391,7 @@ func (m *awsEc2query_deserializeOpCreateInternetGateway) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateInternetGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateImageUsageReportOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7604,7 +7405,7 @@ func (m *awsEc2query_deserializeOpCreateInternetGateway) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateImageUsageReport(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7637,14 +7438,14 @@ func awsEc2query_deserializeOpErrorCreateInternetGateway(response *smithyhttp.Re } } -type awsEc2query_deserializeOpCreateIpam struct { +type awsEc2query_deserializeOpCreateInstanceConnectEndpoint struct { } -func (*awsEc2query_deserializeOpCreateIpam) ID() string { +func (*awsEc2query_deserializeOpCreateInstanceConnectEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateInstanceConnectEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7662,9 +7463,9 @@ func (m *awsEc2query_deserializeOpCreateIpam) HandleDeserialize(ctx context.Cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateIpam(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateInstanceConnectEndpoint(response, &metadata) } - output := &CreateIpamOutput{} + output := &CreateInstanceConnectEndpointOutput{} out.Result = output var buff [1024]byte @@ -7685,7 +7486,7 @@ func (m *awsEc2query_deserializeOpCreateIpam) HandleDeserialize(ctx context.Cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateIpamOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateInstanceConnectEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7699,7 +7500,7 @@ func (m *awsEc2query_deserializeOpCreateIpam) HandleDeserialize(ctx context.Cont return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateInstanceConnectEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7732,14 +7533,14 @@ func awsEc2query_deserializeOpErrorCreateIpam(response *smithyhttp.Response, met } } -type awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken struct { +type awsEc2query_deserializeOpCreateInstanceEventWindow struct { } -func (*awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) ID() string { +func (*awsEc2query_deserializeOpCreateInstanceEventWindow) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7757,9 +7558,9 @@ func (m *awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateIpamExternalResourceVerificationToken(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateInstanceEventWindow(response, &metadata) } - output := &CreateIpamExternalResourceVerificationTokenOutput{} + output := &CreateInstanceEventWindowOutput{} out.Result = output var buff [1024]byte @@ -7780,7 +7581,7 @@ func (m *awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateIpamExternalResourceVerificationTokenOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateInstanceEventWindowOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7794,7 +7595,7 @@ func (m *awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) H return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateIpamExternalResourceVerificationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7827,14 +7628,14 @@ func awsEc2query_deserializeOpErrorCreateIpamExternalResourceVerificationToken(r } } -type awsEc2query_deserializeOpCreateIpamPool struct { +type awsEc2query_deserializeOpCreateInstanceExportTask struct { } -func (*awsEc2query_deserializeOpCreateIpamPool) ID() string { +func (*awsEc2query_deserializeOpCreateInstanceExportTask) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateIpamPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateInstanceExportTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7852,9 +7653,9 @@ func (m *awsEc2query_deserializeOpCreateIpamPool) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateIpamPool(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateInstanceExportTask(response, &metadata) } - output := &CreateIpamPoolOutput{} + output := &CreateInstanceExportTaskOutput{} out.Result = output var buff [1024]byte @@ -7875,7 +7676,7 @@ func (m *awsEc2query_deserializeOpCreateIpamPool) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateIpamPoolOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateInstanceExportTaskOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7889,7 +7690,7 @@ func (m *awsEc2query_deserializeOpCreateIpamPool) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateIpamPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateInstanceExportTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -7922,14 +7723,14 @@ func awsEc2query_deserializeOpErrorCreateIpamPool(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpCreateIpamResourceDiscovery struct { +type awsEc2query_deserializeOpCreateInternetGateway struct { } -func (*awsEc2query_deserializeOpCreateIpamResourceDiscovery) ID() string { +func (*awsEc2query_deserializeOpCreateInternetGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7947,9 +7748,9 @@ func (m *awsEc2query_deserializeOpCreateIpamResourceDiscovery) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateIpamResourceDiscovery(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateInternetGateway(response, &metadata) } - output := &CreateIpamResourceDiscoveryOutput{} + output := &CreateInternetGatewayOutput{} out.Result = output var buff [1024]byte @@ -7970,7 +7771,7 @@ func (m *awsEc2query_deserializeOpCreateIpamResourceDiscovery) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateIpamResourceDiscoveryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateInternetGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7984,7 +7785,7 @@ func (m *awsEc2query_deserializeOpCreateIpamResourceDiscovery) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8017,14 +7818,14 @@ func awsEc2query_deserializeOpErrorCreateIpamResourceDiscovery(response *smithyh } } -type awsEc2query_deserializeOpCreateIpamScope struct { +type awsEc2query_deserializeOpCreateIpam struct { } -func (*awsEc2query_deserializeOpCreateIpamScope) ID() string { +func (*awsEc2query_deserializeOpCreateIpam) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateIpamScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8042,9 +7843,9 @@ func (m *awsEc2query_deserializeOpCreateIpamScope) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateIpamScope(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateIpam(response, &metadata) } - output := &CreateIpamScopeOutput{} + output := &CreateIpamOutput{} out.Result = output var buff [1024]byte @@ -8065,7 +7866,7 @@ func (m *awsEc2query_deserializeOpCreateIpamScope) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateIpamScopeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateIpamOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8079,7 +7880,7 @@ func (m *awsEc2query_deserializeOpCreateIpamScope) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateIpamScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8112,14 +7913,14 @@ func awsEc2query_deserializeOpErrorCreateIpamScope(response *smithyhttp.Response } } -type awsEc2query_deserializeOpCreateKeyPair struct { +type awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken struct { } -func (*awsEc2query_deserializeOpCreateKeyPair) ID() string { +func (*awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateIpamExternalResourceVerificationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8137,9 +7938,9 @@ func (m *awsEc2query_deserializeOpCreateKeyPair) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateKeyPair(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateIpamExternalResourceVerificationToken(response, &metadata) } - output := &CreateKeyPairOutput{} + output := &CreateIpamExternalResourceVerificationTokenOutput{} out.Result = output var buff [1024]byte @@ -8160,7 +7961,7 @@ func (m *awsEc2query_deserializeOpCreateKeyPair) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateKeyPairOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateIpamExternalResourceVerificationTokenOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8174,7 +7975,7 @@ func (m *awsEc2query_deserializeOpCreateKeyPair) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateIpamExternalResourceVerificationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8207,14 +8008,14 @@ func awsEc2query_deserializeOpErrorCreateKeyPair(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpCreateLaunchTemplate struct { +type awsEc2query_deserializeOpCreateIpamPool struct { } -func (*awsEc2query_deserializeOpCreateLaunchTemplate) ID() string { +func (*awsEc2query_deserializeOpCreateIpamPool) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLaunchTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateIpamPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8232,9 +8033,9 @@ func (m *awsEc2query_deserializeOpCreateLaunchTemplate) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLaunchTemplate(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateIpamPool(response, &metadata) } - output := &CreateLaunchTemplateOutput{} + output := &CreateIpamPoolOutput{} out.Result = output var buff [1024]byte @@ -8255,7 +8056,7 @@ func (m *awsEc2query_deserializeOpCreateLaunchTemplate) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLaunchTemplateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateIpamPoolOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8269,7 +8070,7 @@ func (m *awsEc2query_deserializeOpCreateLaunchTemplate) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLaunchTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateIpamPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8302,14 +8103,14 @@ func awsEc2query_deserializeOpErrorCreateLaunchTemplate(response *smithyhttp.Res } } -type awsEc2query_deserializeOpCreateLaunchTemplateVersion struct { +type awsEc2query_deserializeOpCreateIpamResourceDiscovery struct { } -func (*awsEc2query_deserializeOpCreateLaunchTemplateVersion) ID() string { +func (*awsEc2query_deserializeOpCreateIpamResourceDiscovery) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLaunchTemplateVersion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8327,9 +8128,9 @@ func (m *awsEc2query_deserializeOpCreateLaunchTemplateVersion) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLaunchTemplateVersion(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateIpamResourceDiscovery(response, &metadata) } - output := &CreateLaunchTemplateVersionOutput{} + output := &CreateIpamResourceDiscoveryOutput{} out.Result = output var buff [1024]byte @@ -8350,7 +8151,7 @@ func (m *awsEc2query_deserializeOpCreateLaunchTemplateVersion) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLaunchTemplateVersionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateIpamResourceDiscoveryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8364,7 +8165,7 @@ func (m *awsEc2query_deserializeOpCreateLaunchTemplateVersion) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLaunchTemplateVersion(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8397,14 +8198,14 @@ func awsEc2query_deserializeOpErrorCreateLaunchTemplateVersion(response *smithyh } } -type awsEc2query_deserializeOpCreateLocalGatewayRoute struct { +type awsEc2query_deserializeOpCreateIpamScope struct { } -func (*awsEc2query_deserializeOpCreateLocalGatewayRoute) ID() string { +func (*awsEc2query_deserializeOpCreateIpamScope) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLocalGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateIpamScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8422,9 +8223,9 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRoute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateIpamScope(response, &metadata) } - output := &CreateLocalGatewayRouteOutput{} + output := &CreateIpamScopeOutput{} out.Result = output var buff [1024]byte @@ -8445,7 +8246,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRoute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateIpamScopeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8459,7 +8260,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRoute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLocalGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateIpamScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8492,14 +8293,14 @@ func awsEc2query_deserializeOpErrorCreateLocalGatewayRoute(response *smithyhttp. } } -type awsEc2query_deserializeOpCreateLocalGatewayRouteTable struct { +type awsEc2query_deserializeOpCreateKeyPair struct { } -func (*awsEc2query_deserializeOpCreateLocalGatewayRouteTable) ID() string { +func (*awsEc2query_deserializeOpCreateKeyPair) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8517,9 +8318,9 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTable) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateKeyPair(response, &metadata) } - output := &CreateLocalGatewayRouteTableOutput{} + output := &CreateKeyPairOutput{} out.Result = output var buff [1024]byte @@ -8540,7 +8341,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTable) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateKeyPairOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8554,7 +8355,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTable) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8587,14 +8388,14 @@ func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTable(response *smithy } } -type awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { +type awsEc2query_deserializeOpCreateLaunchTemplate struct { } -func (*awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation) ID() string { +func (*awsEc2query_deserializeOpCreateLaunchTemplate) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLaunchTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8612,9 +8413,9 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGr } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLaunchTemplate(response, &metadata) } - output := &CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput{} + output := &CreateLaunchTemplateOutput{} out.Result = output var buff [1024]byte @@ -8635,7 +8436,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGr } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLaunchTemplateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8649,7 +8450,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGr return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLaunchTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8682,14 +8483,14 @@ func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVirtualInterfaceG } } -type awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation struct { +type awsEc2query_deserializeOpCreateLaunchTemplateVersion struct { } -func (*awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) ID() string { +func (*awsEc2query_deserializeOpCreateLaunchTemplateVersion) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLaunchTemplateVersion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8707,9 +8508,9 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVpcAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLaunchTemplateVersion(response, &metadata) } - output := &CreateLocalGatewayRouteTableVpcAssociationOutput{} + output := &CreateLaunchTemplateVersionOutput{} out.Result = output var buff [1024]byte @@ -8730,7 +8531,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteTableVpcAssociationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLaunchTemplateVersionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8744,7 +8545,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVpcAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLaunchTemplateVersion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8777,14 +8578,14 @@ func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVpcAssociation(re } } -type awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface struct { +type awsEc2query_deserializeOpCreateLocalGatewayRoute struct { } -func (*awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) ID() string { +func (*awsEc2query_deserializeOpCreateLocalGatewayRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLocalGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8802,9 +8603,9 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterface(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRoute(response, &metadata) } - output := &CreateLocalGatewayVirtualInterfaceOutput{} + output := &CreateLocalGatewayRouteOutput{} out.Result = output var buff [1024]byte @@ -8825,7 +8626,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLocalGatewayVirtualInterfaceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8839,7 +8640,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLocalGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8872,14 +8673,14 @@ func awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterface(response * } } -type awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup struct { +type awsEc2query_deserializeOpCreateLocalGatewayRouteTable struct { } -func (*awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) ID() string { +func (*awsEc2query_deserializeOpCreateLocalGatewayRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8897,9 +8698,9 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterfaceGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTable(response, &metadata) } - output := &CreateLocalGatewayVirtualInterfaceGroupOutput{} + output := &CreateLocalGatewayRouteTableOutput{} out.Result = output var buff [1024]byte @@ -8920,7 +8721,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateLocalGatewayVirtualInterfaceGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8934,7 +8735,7 @@ func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterfaceGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8967,14 +8768,14 @@ func awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterfaceGroup(respo } } -type awsEc2query_deserializeOpCreateManagedPrefixList struct { +type awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { } -func (*awsEc2query_deserializeOpCreateManagedPrefixList) ID() string { +func (*awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateManagedPrefixList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8992,9 +8793,9 @@ func (m *awsEc2query_deserializeOpCreateManagedPrefixList) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateManagedPrefixList(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response, &metadata) } - output := &CreateManagedPrefixListOutput{} + output := &CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput{} out.Result = output var buff [1024]byte @@ -9015,7 +8816,7 @@ func (m *awsEc2query_deserializeOpCreateManagedPrefixList) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateManagedPrefixListOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9029,7 +8830,7 @@ func (m *awsEc2query_deserializeOpCreateManagedPrefixList) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateManagedPrefixList(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9062,14 +8863,14 @@ func awsEc2query_deserializeOpErrorCreateManagedPrefixList(response *smithyhttp. } } -type awsEc2query_deserializeOpCreateNatGateway struct { +type awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation struct { } -func (*awsEc2query_deserializeOpCreateNatGateway) ID() string { +func (*awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNatGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLocalGatewayRouteTableVpcAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9087,9 +8888,9 @@ func (m *awsEc2query_deserializeOpCreateNatGateway) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNatGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVpcAssociation(response, &metadata) } - output := &CreateNatGatewayOutput{} + output := &CreateLocalGatewayRouteTableVpcAssociationOutput{} out.Result = output var buff [1024]byte @@ -9110,7 +8911,7 @@ func (m *awsEc2query_deserializeOpCreateNatGateway) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateNatGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLocalGatewayRouteTableVpcAssociationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9124,7 +8925,7 @@ func (m *awsEc2query_deserializeOpCreateNatGateway) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNatGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLocalGatewayRouteTableVpcAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9157,14 +8958,14 @@ func awsEc2query_deserializeOpErrorCreateNatGateway(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpCreateNetworkAcl struct { +type awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface struct { } -func (*awsEc2query_deserializeOpCreateNetworkAcl) ID() string { +func (*awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNetworkAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9182,9 +8983,9 @@ func (m *awsEc2query_deserializeOpCreateNetworkAcl) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkAcl(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterface(response, &metadata) } - output := &CreateNetworkAclOutput{} + output := &CreateLocalGatewayVirtualInterfaceOutput{} out.Result = output var buff [1024]byte @@ -9205,7 +9006,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkAcl) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateNetworkAclOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateLocalGatewayVirtualInterfaceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9219,7 +9020,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkAcl) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNetworkAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9252,14 +9053,14 @@ func awsEc2query_deserializeOpErrorCreateNetworkAcl(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpCreateNetworkAclEntry struct { +type awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup struct { } -func (*awsEc2query_deserializeOpCreateNetworkAclEntry) ID() string { +func (*awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNetworkAclEntry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateLocalGatewayVirtualInterfaceGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9277,21 +9078,44 @@ func (m *awsEc2query_deserializeOpCreateNetworkAclEntry) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkAclEntry(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterfaceGroup(response, &metadata) } - output := &CreateNetworkAclEntryOutput{} + output := &CreateLocalGatewayVirtualInterfaceGroupOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentCreateLocalGatewayVirtualInterfaceGroupOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNetworkAclEntry(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateLocalGatewayVirtualInterfaceGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9324,14 +9148,14 @@ func awsEc2query_deserializeOpErrorCreateNetworkAclEntry(response *smithyhttp.Re } } -type awsEc2query_deserializeOpCreateNetworkInsightsAccessScope struct { +type awsEc2query_deserializeOpCreateMacSystemIntegrityProtectionModificationTask struct { } -func (*awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) ID() string { +func (*awsEc2query_deserializeOpCreateMacSystemIntegrityProtectionModificationTask) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateMacSystemIntegrityProtectionModificationTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9349,9 +9173,9 @@ func (m *awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInsightsAccessScope(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateMacSystemIntegrityProtectionModificationTask(response, &metadata) } - output := &CreateNetworkInsightsAccessScopeOutput{} + output := &CreateMacSystemIntegrityProtectionModificationTaskOutput{} out.Result = output var buff [1024]byte @@ -9372,7 +9196,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateNetworkInsightsAccessScopeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateMacSystemIntegrityProtectionModificationTaskOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9386,7 +9210,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNetworkInsightsAccessScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateMacSystemIntegrityProtectionModificationTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9419,14 +9243,14 @@ func awsEc2query_deserializeOpErrorCreateNetworkInsightsAccessScope(response *sm } } -type awsEc2query_deserializeOpCreateNetworkInsightsPath struct { +type awsEc2query_deserializeOpCreateManagedPrefixList struct { } -func (*awsEc2query_deserializeOpCreateNetworkInsightsPath) ID() string { +func (*awsEc2query_deserializeOpCreateManagedPrefixList) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNetworkInsightsPath) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateManagedPrefixList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9444,9 +9268,9 @@ func (m *awsEc2query_deserializeOpCreateNetworkInsightsPath) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInsightsPath(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateManagedPrefixList(response, &metadata) } - output := &CreateNetworkInsightsPathOutput{} + output := &CreateManagedPrefixListOutput{} out.Result = output var buff [1024]byte @@ -9467,7 +9291,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInsightsPath) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateNetworkInsightsPathOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateManagedPrefixListOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9481,7 +9305,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInsightsPath) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNetworkInsightsPath(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateManagedPrefixList(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9514,14 +9338,14 @@ func awsEc2query_deserializeOpErrorCreateNetworkInsightsPath(response *smithyhtt } } -type awsEc2query_deserializeOpCreateNetworkInterface struct { +type awsEc2query_deserializeOpCreateNatGateway struct { } -func (*awsEc2query_deserializeOpCreateNetworkInterface) ID() string { +func (*awsEc2query_deserializeOpCreateNatGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNetworkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateNatGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9539,9 +9363,9 @@ func (m *awsEc2query_deserializeOpCreateNetworkInterface) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInterface(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateNatGateway(response, &metadata) } - output := &CreateNetworkInterfaceOutput{} + output := &CreateNatGatewayOutput{} out.Result = output var buff [1024]byte @@ -9562,7 +9386,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInterface) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateNetworkInterfaceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateNatGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9576,7 +9400,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInterface) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNetworkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateNatGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9609,14 +9433,14 @@ func awsEc2query_deserializeOpErrorCreateNetworkInterface(response *smithyhttp.R } } -type awsEc2query_deserializeOpCreateNetworkInterfacePermission struct { +type awsEc2query_deserializeOpCreateNetworkAcl struct { } -func (*awsEc2query_deserializeOpCreateNetworkInterfacePermission) ID() string { +func (*awsEc2query_deserializeOpCreateNetworkAcl) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateNetworkInterfacePermission) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateNetworkAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9634,9 +9458,9 @@ func (m *awsEc2query_deserializeOpCreateNetworkInterfacePermission) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInterfacePermission(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkAcl(response, &metadata) } - output := &CreateNetworkInterfacePermissionOutput{} + output := &CreateNetworkAclOutput{} out.Result = output var buff [1024]byte @@ -9657,7 +9481,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInterfacePermission) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateNetworkInterfacePermissionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateNetworkAclOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9671,7 +9495,7 @@ func (m *awsEc2query_deserializeOpCreateNetworkInterfacePermission) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateNetworkInterfacePermission(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateNetworkAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9704,14 +9528,14 @@ func awsEc2query_deserializeOpErrorCreateNetworkInterfacePermission(response *sm } } -type awsEc2query_deserializeOpCreatePlacementGroup struct { +type awsEc2query_deserializeOpCreateNetworkAclEntry struct { } -func (*awsEc2query_deserializeOpCreatePlacementGroup) ID() string { +func (*awsEc2query_deserializeOpCreateNetworkAclEntry) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreatePlacementGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateNetworkAclEntry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9729,9 +9553,81 @@ func (m *awsEc2query_deserializeOpCreatePlacementGroup) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreatePlacementGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkAclEntry(response, &metadata) } - output := &CreatePlacementGroupOutput{} + output := &CreateNetworkAclEntryOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorCreateNetworkAclEntry(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpCreateNetworkInsightsAccessScope struct { +} + +func (*awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpCreateNetworkInsightsAccessScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInsightsAccessScope(response, &metadata) + } + output := &CreateNetworkInsightsAccessScopeOutput{} out.Result = output var buff [1024]byte @@ -9752,7 +9648,7 @@ func (m *awsEc2query_deserializeOpCreatePlacementGroup) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreatePlacementGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateNetworkInsightsAccessScopeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9766,7 +9662,7 @@ func (m *awsEc2query_deserializeOpCreatePlacementGroup) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorCreatePlacementGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateNetworkInsightsAccessScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9799,14 +9695,14 @@ func awsEc2query_deserializeOpErrorCreatePlacementGroup(response *smithyhttp.Res } } -type awsEc2query_deserializeOpCreatePublicIpv4Pool struct { +type awsEc2query_deserializeOpCreateNetworkInsightsPath struct { } -func (*awsEc2query_deserializeOpCreatePublicIpv4Pool) ID() string { +func (*awsEc2query_deserializeOpCreateNetworkInsightsPath) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreatePublicIpv4Pool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateNetworkInsightsPath) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9824,9 +9720,9 @@ func (m *awsEc2query_deserializeOpCreatePublicIpv4Pool) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreatePublicIpv4Pool(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInsightsPath(response, &metadata) } - output := &CreatePublicIpv4PoolOutput{} + output := &CreateNetworkInsightsPathOutput{} out.Result = output var buff [1024]byte @@ -9847,7 +9743,7 @@ func (m *awsEc2query_deserializeOpCreatePublicIpv4Pool) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreatePublicIpv4PoolOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateNetworkInsightsPathOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9861,7 +9757,7 @@ func (m *awsEc2query_deserializeOpCreatePublicIpv4Pool) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorCreatePublicIpv4Pool(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateNetworkInsightsPath(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9894,14 +9790,14 @@ func awsEc2query_deserializeOpErrorCreatePublicIpv4Pool(response *smithyhttp.Res } } -type awsEc2query_deserializeOpCreateReplaceRootVolumeTask struct { +type awsEc2query_deserializeOpCreateNetworkInterface struct { } -func (*awsEc2query_deserializeOpCreateReplaceRootVolumeTask) ID() string { +func (*awsEc2query_deserializeOpCreateNetworkInterface) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateReplaceRootVolumeTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateNetworkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9919,9 +9815,9 @@ func (m *awsEc2query_deserializeOpCreateReplaceRootVolumeTask) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateReplaceRootVolumeTask(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInterface(response, &metadata) } - output := &CreateReplaceRootVolumeTaskOutput{} + output := &CreateNetworkInterfaceOutput{} out.Result = output var buff [1024]byte @@ -9942,7 +9838,7 @@ func (m *awsEc2query_deserializeOpCreateReplaceRootVolumeTask) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateReplaceRootVolumeTaskOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateNetworkInterfaceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9956,7 +9852,7 @@ func (m *awsEc2query_deserializeOpCreateReplaceRootVolumeTask) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateReplaceRootVolumeTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateNetworkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9989,14 +9885,14 @@ func awsEc2query_deserializeOpErrorCreateReplaceRootVolumeTask(response *smithyh } } -type awsEc2query_deserializeOpCreateReservedInstancesListing struct { +type awsEc2query_deserializeOpCreateNetworkInterfacePermission struct { } -func (*awsEc2query_deserializeOpCreateReservedInstancesListing) ID() string { +func (*awsEc2query_deserializeOpCreateNetworkInterfacePermission) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateReservedInstancesListing) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateNetworkInterfacePermission) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10014,9 +9910,9 @@ func (m *awsEc2query_deserializeOpCreateReservedInstancesListing) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateReservedInstancesListing(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateNetworkInterfacePermission(response, &metadata) } - output := &CreateReservedInstancesListingOutput{} + output := &CreateNetworkInterfacePermissionOutput{} out.Result = output var buff [1024]byte @@ -10037,7 +9933,7 @@ func (m *awsEc2query_deserializeOpCreateReservedInstancesListing) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateReservedInstancesListingOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateNetworkInterfacePermissionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10051,7 +9947,7 @@ func (m *awsEc2query_deserializeOpCreateReservedInstancesListing) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateReservedInstancesListing(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateNetworkInterfacePermission(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10084,14 +9980,14 @@ func awsEc2query_deserializeOpErrorCreateReservedInstancesListing(response *smit } } -type awsEc2query_deserializeOpCreateRestoreImageTask struct { +type awsEc2query_deserializeOpCreatePlacementGroup struct { } -func (*awsEc2query_deserializeOpCreateRestoreImageTask) ID() string { +func (*awsEc2query_deserializeOpCreatePlacementGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateRestoreImageTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreatePlacementGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10109,9 +10005,9 @@ func (m *awsEc2query_deserializeOpCreateRestoreImageTask) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateRestoreImageTask(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreatePlacementGroup(response, &metadata) } - output := &CreateRestoreImageTaskOutput{} + output := &CreatePlacementGroupOutput{} out.Result = output var buff [1024]byte @@ -10132,7 +10028,7 @@ func (m *awsEc2query_deserializeOpCreateRestoreImageTask) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateRestoreImageTaskOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreatePlacementGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10146,7 +10042,7 @@ func (m *awsEc2query_deserializeOpCreateRestoreImageTask) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateRestoreImageTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreatePlacementGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10179,14 +10075,14 @@ func awsEc2query_deserializeOpErrorCreateRestoreImageTask(response *smithyhttp.R } } -type awsEc2query_deserializeOpCreateRoute struct { +type awsEc2query_deserializeOpCreatePublicIpv4Pool struct { } -func (*awsEc2query_deserializeOpCreateRoute) ID() string { +func (*awsEc2query_deserializeOpCreatePublicIpv4Pool) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreatePublicIpv4Pool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10204,9 +10100,9 @@ func (m *awsEc2query_deserializeOpCreateRoute) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreatePublicIpv4Pool(response, &metadata) } - output := &CreateRouteOutput{} + output := &CreatePublicIpv4PoolOutput{} out.Result = output var buff [1024]byte @@ -10227,7 +10123,7 @@ func (m *awsEc2query_deserializeOpCreateRoute) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreatePublicIpv4PoolOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10241,7 +10137,7 @@ func (m *awsEc2query_deserializeOpCreateRoute) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreatePublicIpv4Pool(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10274,14 +10170,14 @@ func awsEc2query_deserializeOpErrorCreateRoute(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpCreateRouteServer struct { +type awsEc2query_deserializeOpCreateReplaceRootVolumeTask struct { } -func (*awsEc2query_deserializeOpCreateRouteServer) ID() string { +func (*awsEc2query_deserializeOpCreateReplaceRootVolumeTask) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateReplaceRootVolumeTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10299,9 +10195,9 @@ func (m *awsEc2query_deserializeOpCreateRouteServer) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateRouteServer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateReplaceRootVolumeTask(response, &metadata) } - output := &CreateRouteServerOutput{} + output := &CreateReplaceRootVolumeTaskOutput{} out.Result = output var buff [1024]byte @@ -10322,7 +10218,7 @@ func (m *awsEc2query_deserializeOpCreateRouteServer) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateRouteServerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateReplaceRootVolumeTaskOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10336,7 +10232,7 @@ func (m *awsEc2query_deserializeOpCreateRouteServer) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateReplaceRootVolumeTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10369,14 +10265,14 @@ func awsEc2query_deserializeOpErrorCreateRouteServer(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpCreateRouteServerEndpoint struct { +type awsEc2query_deserializeOpCreateReservedInstancesListing struct { } -func (*awsEc2query_deserializeOpCreateRouteServerEndpoint) ID() string { +func (*awsEc2query_deserializeOpCreateReservedInstancesListing) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateRouteServerEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateReservedInstancesListing) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10394,9 +10290,9 @@ func (m *awsEc2query_deserializeOpCreateRouteServerEndpoint) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateRouteServerEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateReservedInstancesListing(response, &metadata) } - output := &CreateRouteServerEndpointOutput{} + output := &CreateReservedInstancesListingOutput{} out.Result = output var buff [1024]byte @@ -10417,7 +10313,7 @@ func (m *awsEc2query_deserializeOpCreateRouteServerEndpoint) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateRouteServerEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateReservedInstancesListingOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10431,7 +10327,7 @@ func (m *awsEc2query_deserializeOpCreateRouteServerEndpoint) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateRouteServerEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateReservedInstancesListing(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10464,14 +10360,14 @@ func awsEc2query_deserializeOpErrorCreateRouteServerEndpoint(response *smithyhtt } } -type awsEc2query_deserializeOpCreateRouteServerPeer struct { +type awsEc2query_deserializeOpCreateRestoreImageTask struct { } -func (*awsEc2query_deserializeOpCreateRouteServerPeer) ID() string { +func (*awsEc2query_deserializeOpCreateRestoreImageTask) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateRouteServerPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateRestoreImageTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10489,9 +10385,9 @@ func (m *awsEc2query_deserializeOpCreateRouteServerPeer) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateRouteServerPeer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateRestoreImageTask(response, &metadata) } - output := &CreateRouteServerPeerOutput{} + output := &CreateRestoreImageTaskOutput{} out.Result = output var buff [1024]byte @@ -10512,7 +10408,7 @@ func (m *awsEc2query_deserializeOpCreateRouteServerPeer) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateRouteServerPeerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateRestoreImageTaskOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10526,7 +10422,7 @@ func (m *awsEc2query_deserializeOpCreateRouteServerPeer) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateRouteServerPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateRestoreImageTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10559,14 +10455,14 @@ func awsEc2query_deserializeOpErrorCreateRouteServerPeer(response *smithyhttp.Re } } -type awsEc2query_deserializeOpCreateRouteTable struct { +type awsEc2query_deserializeOpCreateRoute struct { } -func (*awsEc2query_deserializeOpCreateRouteTable) ID() string { +func (*awsEc2query_deserializeOpCreateRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10584,9 +10480,9 @@ func (m *awsEc2query_deserializeOpCreateRouteTable) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateRoute(response, &metadata) } - output := &CreateRouteTableOutput{} + output := &CreateRouteOutput{} out.Result = output var buff [1024]byte @@ -10607,7 +10503,7 @@ func (m *awsEc2query_deserializeOpCreateRouteTable) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateRouteTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10621,7 +10517,7 @@ func (m *awsEc2query_deserializeOpCreateRouteTable) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10654,14 +10550,14 @@ func awsEc2query_deserializeOpErrorCreateRouteTable(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpCreateSecurityGroup struct { +type awsEc2query_deserializeOpCreateRouteServer struct { } -func (*awsEc2query_deserializeOpCreateSecurityGroup) ID() string { +func (*awsEc2query_deserializeOpCreateRouteServer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateSecurityGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10679,9 +10575,9 @@ func (m *awsEc2query_deserializeOpCreateSecurityGroup) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateSecurityGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateRouteServer(response, &metadata) } - output := &CreateSecurityGroupOutput{} + output := &CreateRouteServerOutput{} out.Result = output var buff [1024]byte @@ -10702,7 +10598,7 @@ func (m *awsEc2query_deserializeOpCreateSecurityGroup) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateSecurityGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateRouteServerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10716,7 +10612,7 @@ func (m *awsEc2query_deserializeOpCreateSecurityGroup) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateSecurityGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10749,14 +10645,14 @@ func awsEc2query_deserializeOpErrorCreateSecurityGroup(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpCreateSnapshot struct { +type awsEc2query_deserializeOpCreateRouteServerEndpoint struct { } -func (*awsEc2query_deserializeOpCreateSnapshot) ID() string { +func (*awsEc2query_deserializeOpCreateRouteServerEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateRouteServerEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10774,9 +10670,9 @@ func (m *awsEc2query_deserializeOpCreateSnapshot) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateSnapshot(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateRouteServerEndpoint(response, &metadata) } - output := &CreateSnapshotOutput{} + output := &CreateRouteServerEndpointOutput{} out.Result = output var buff [1024]byte @@ -10797,7 +10693,7 @@ func (m *awsEc2query_deserializeOpCreateSnapshot) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateSnapshotOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateRouteServerEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10811,7 +10707,7 @@ func (m *awsEc2query_deserializeOpCreateSnapshot) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateRouteServerEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10844,14 +10740,14 @@ func awsEc2query_deserializeOpErrorCreateSnapshot(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpCreateSnapshots struct { +type awsEc2query_deserializeOpCreateRouteServerPeer struct { } -func (*awsEc2query_deserializeOpCreateSnapshots) ID() string { +func (*awsEc2query_deserializeOpCreateRouteServerPeer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateRouteServerPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10869,9 +10765,9 @@ func (m *awsEc2query_deserializeOpCreateSnapshots) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateSnapshots(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateRouteServerPeer(response, &metadata) } - output := &CreateSnapshotsOutput{} + output := &CreateRouteServerPeerOutput{} out.Result = output var buff [1024]byte @@ -10892,7 +10788,7 @@ func (m *awsEc2query_deserializeOpCreateSnapshots) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateSnapshotsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateRouteServerPeerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10906,7 +10802,7 @@ func (m *awsEc2query_deserializeOpCreateSnapshots) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateRouteServerPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10939,14 +10835,14 @@ func awsEc2query_deserializeOpErrorCreateSnapshots(response *smithyhttp.Response } } -type awsEc2query_deserializeOpCreateSpotDatafeedSubscription struct { +type awsEc2query_deserializeOpCreateRouteTable struct { } -func (*awsEc2query_deserializeOpCreateSpotDatafeedSubscription) ID() string { +func (*awsEc2query_deserializeOpCreateRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateSpotDatafeedSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10964,9 +10860,9 @@ func (m *awsEc2query_deserializeOpCreateSpotDatafeedSubscription) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateSpotDatafeedSubscription(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateRouteTable(response, &metadata) } - output := &CreateSpotDatafeedSubscriptionOutput{} + output := &CreateRouteTableOutput{} out.Result = output var buff [1024]byte @@ -10987,7 +10883,7 @@ func (m *awsEc2query_deserializeOpCreateSpotDatafeedSubscription) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateSpotDatafeedSubscriptionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateRouteTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11001,7 +10897,7 @@ func (m *awsEc2query_deserializeOpCreateSpotDatafeedSubscription) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateSpotDatafeedSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11034,14 +10930,14 @@ func awsEc2query_deserializeOpErrorCreateSpotDatafeedSubscription(response *smit } } -type awsEc2query_deserializeOpCreateStoreImageTask struct { +type awsEc2query_deserializeOpCreateSecurityGroup struct { } -func (*awsEc2query_deserializeOpCreateStoreImageTask) ID() string { +func (*awsEc2query_deserializeOpCreateSecurityGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateStoreImageTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateSecurityGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11059,9 +10955,9 @@ func (m *awsEc2query_deserializeOpCreateStoreImageTask) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateStoreImageTask(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateSecurityGroup(response, &metadata) } - output := &CreateStoreImageTaskOutput{} + output := &CreateSecurityGroupOutput{} out.Result = output var buff [1024]byte @@ -11082,7 +10978,7 @@ func (m *awsEc2query_deserializeOpCreateStoreImageTask) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateStoreImageTaskOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateSecurityGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11096,7 +10992,7 @@ func (m *awsEc2query_deserializeOpCreateStoreImageTask) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateStoreImageTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateSecurityGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11129,14 +11025,14 @@ func awsEc2query_deserializeOpErrorCreateStoreImageTask(response *smithyhttp.Res } } -type awsEc2query_deserializeOpCreateSubnet struct { +type awsEc2query_deserializeOpCreateSnapshot struct { } -func (*awsEc2query_deserializeOpCreateSubnet) ID() string { +func (*awsEc2query_deserializeOpCreateSnapshot) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateSubnet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11154,9 +11050,9 @@ func (m *awsEc2query_deserializeOpCreateSubnet) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateSubnet(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateSnapshot(response, &metadata) } - output := &CreateSubnetOutput{} + output := &CreateSnapshotOutput{} out.Result = output var buff [1024]byte @@ -11177,7 +11073,7 @@ func (m *awsEc2query_deserializeOpCreateSubnet) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateSubnetOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateSnapshotOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11191,7 +11087,7 @@ func (m *awsEc2query_deserializeOpCreateSubnet) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateSubnet(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11224,14 +11120,14 @@ func awsEc2query_deserializeOpErrorCreateSubnet(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpCreateSubnetCidrReservation struct { +type awsEc2query_deserializeOpCreateSnapshots struct { } -func (*awsEc2query_deserializeOpCreateSubnetCidrReservation) ID() string { +func (*awsEc2query_deserializeOpCreateSnapshots) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateSubnetCidrReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11249,9 +11145,9 @@ func (m *awsEc2query_deserializeOpCreateSubnetCidrReservation) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateSubnetCidrReservation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateSnapshots(response, &metadata) } - output := &CreateSubnetCidrReservationOutput{} + output := &CreateSnapshotsOutput{} out.Result = output var buff [1024]byte @@ -11272,7 +11168,7 @@ func (m *awsEc2query_deserializeOpCreateSubnetCidrReservation) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateSubnetCidrReservationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateSnapshotsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11286,79 +11182,7 @@ func (m *awsEc2query_deserializeOpCreateSubnetCidrReservation) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateSubnetCidrReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpCreateTags struct { -} - -func (*awsEc2query_deserializeOpCreateTags) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpCreateTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTags(response, &metadata) - } - output := &CreateTagsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorCreateTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11391,14 +11215,14 @@ func awsEc2query_deserializeOpErrorCreateTags(response *smithyhttp.Response, met } } -type awsEc2query_deserializeOpCreateTrafficMirrorFilter struct { +type awsEc2query_deserializeOpCreateSpotDatafeedSubscription struct { } -func (*awsEc2query_deserializeOpCreateTrafficMirrorFilter) ID() string { +func (*awsEc2query_deserializeOpCreateSpotDatafeedSubscription) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilter) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateSpotDatafeedSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11416,9 +11240,9 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilter) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorFilter(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateSpotDatafeedSubscription(response, &metadata) } - output := &CreateTrafficMirrorFilterOutput{} + output := &CreateSpotDatafeedSubscriptionOutput{} out.Result = output var buff [1024]byte @@ -11439,7 +11263,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilter) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorFilterOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateSpotDatafeedSubscriptionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11453,7 +11277,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilter) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTrafficMirrorFilter(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateSpotDatafeedSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11486,14 +11310,14 @@ func awsEc2query_deserializeOpErrorCreateTrafficMirrorFilter(response *smithyhtt } } -type awsEc2query_deserializeOpCreateTrafficMirrorFilterRule struct { +type awsEc2query_deserializeOpCreateStoreImageTask struct { } -func (*awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) ID() string { +func (*awsEc2query_deserializeOpCreateStoreImageTask) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateStoreImageTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11511,9 +11335,9 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorFilterRule(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateStoreImageTask(response, &metadata) } - output := &CreateTrafficMirrorFilterRuleOutput{} + output := &CreateStoreImageTaskOutput{} out.Result = output var buff [1024]byte @@ -11534,7 +11358,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorFilterRuleOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateStoreImageTaskOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11548,7 +11372,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTrafficMirrorFilterRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateStoreImageTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11581,14 +11405,14 @@ func awsEc2query_deserializeOpErrorCreateTrafficMirrorFilterRule(response *smith } } -type awsEc2query_deserializeOpCreateTrafficMirrorSession struct { +type awsEc2query_deserializeOpCreateSubnet struct { } -func (*awsEc2query_deserializeOpCreateTrafficMirrorSession) ID() string { +func (*awsEc2query_deserializeOpCreateSubnet) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTrafficMirrorSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateSubnet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11606,9 +11430,9 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorSession) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorSession(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateSubnet(response, &metadata) } - output := &CreateTrafficMirrorSessionOutput{} + output := &CreateSubnetOutput{} out.Result = output var buff [1024]byte @@ -11629,7 +11453,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorSession) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorSessionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateSubnetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11643,7 +11467,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorSession) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTrafficMirrorSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateSubnet(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11676,14 +11500,14 @@ func awsEc2query_deserializeOpErrorCreateTrafficMirrorSession(response *smithyht } } -type awsEc2query_deserializeOpCreateTrafficMirrorTarget struct { +type awsEc2query_deserializeOpCreateSubnetCidrReservation struct { } -func (*awsEc2query_deserializeOpCreateTrafficMirrorTarget) ID() string { +func (*awsEc2query_deserializeOpCreateSubnetCidrReservation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTrafficMirrorTarget) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateSubnetCidrReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11701,9 +11525,9 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorTarget) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorTarget(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateSubnetCidrReservation(response, &metadata) } - output := &CreateTrafficMirrorTargetOutput{} + output := &CreateSubnetCidrReservationOutput{} out.Result = output var buff [1024]byte @@ -11724,7 +11548,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorTarget) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorTargetOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateSubnetCidrReservationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11738,7 +11562,7 @@ func (m *awsEc2query_deserializeOpCreateTrafficMirrorTarget) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTrafficMirrorTarget(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateSubnetCidrReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11771,14 +11595,14 @@ func awsEc2query_deserializeOpErrorCreateTrafficMirrorTarget(response *smithyhtt } } -type awsEc2query_deserializeOpCreateTransitGateway struct { +type awsEc2query_deserializeOpCreateTags struct { } -func (*awsEc2query_deserializeOpCreateTransitGateway) ID() string { +func (*awsEc2query_deserializeOpCreateTags) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11796,44 +11620,21 @@ func (m *awsEc2query_deserializeOpCreateTransitGateway) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTags(response, &metadata) } - output := &CreateTransitGatewayOutput{} + output := &CreateTagsOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } } - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11866,14 +11667,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGateway(response *smithyhttp.Res } } -type awsEc2query_deserializeOpCreateTransitGatewayConnect struct { +type awsEc2query_deserializeOpCreateTrafficMirrorFilter struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayConnect) ID() string { +func (*awsEc2query_deserializeOpCreateTrafficMirrorFilter) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayConnect) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilter) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11891,9 +11692,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayConnect) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayConnect(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorFilter(response, &metadata) } - output := &CreateTransitGatewayConnectOutput{} + output := &CreateTrafficMirrorFilterOutput{} out.Result = output var buff [1024]byte @@ -11914,7 +11715,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayConnect) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayConnectOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorFilterOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11928,7 +11729,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayConnect) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayConnect(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTrafficMirrorFilter(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11961,14 +11762,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayConnect(response *smithyh } } -type awsEc2query_deserializeOpCreateTransitGatewayConnectPeer struct { +type awsEc2query_deserializeOpCreateTrafficMirrorFilterRule struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) ID() string { +func (*awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTrafficMirrorFilterRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11986,9 +11787,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayConnectPeer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorFilterRule(response, &metadata) } - output := &CreateTransitGatewayConnectPeerOutput{} + output := &CreateTrafficMirrorFilterRuleOutput{} out.Result = output var buff [1024]byte @@ -12009,7 +11810,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayConnectPeerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorFilterRuleOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12023,7 +11824,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayConnectPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTrafficMirrorFilterRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12056,14 +11857,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayConnectPeer(response *smi } } -type awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain struct { +type awsEc2query_deserializeOpCreateTrafficMirrorSession struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) ID() string { +func (*awsEc2query_deserializeOpCreateTrafficMirrorSession) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTrafficMirrorSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12081,9 +11882,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayMulticastDomain(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorSession(response, &metadata) } - output := &CreateTransitGatewayMulticastDomainOutput{} + output := &CreateTrafficMirrorSessionOutput{} out.Result = output var buff [1024]byte @@ -12104,7 +11905,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayMulticastDomainOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorSessionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12118,7 +11919,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayMulticastDomain(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTrafficMirrorSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12151,14 +11952,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayMulticastDomain(response } } -type awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment struct { +type awsEc2query_deserializeOpCreateTrafficMirrorTarget struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) ID() string { +func (*awsEc2query_deserializeOpCreateTrafficMirrorTarget) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTrafficMirrorTarget) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12176,9 +11977,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayPeeringAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTrafficMirrorTarget(response, &metadata) } - output := &CreateTransitGatewayPeeringAttachmentOutput{} + output := &CreateTrafficMirrorTargetOutput{} out.Result = output var buff [1024]byte @@ -12199,7 +12000,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayPeeringAttachmentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTrafficMirrorTargetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12213,7 +12014,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayPeeringAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTrafficMirrorTarget(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12246,14 +12047,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayPeeringAttachment(respons } } -type awsEc2query_deserializeOpCreateTransitGatewayPolicyTable struct { +type awsEc2query_deserializeOpCreateTransitGateway struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12271,9 +12072,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayPolicyTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGateway(response, &metadata) } - output := &CreateTransitGatewayPolicyTableOutput{} + output := &CreateTransitGatewayOutput{} out.Result = output var buff [1024]byte @@ -12294,7 +12095,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayPolicyTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12308,7 +12109,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayPolicyTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12341,14 +12142,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayPolicyTable(response *smi } } -type awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference struct { +type awsEc2query_deserializeOpCreateTransitGatewayConnect struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayConnect) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayConnect) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12366,9 +12167,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayPrefixListReference(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayConnect(response, &metadata) } - output := &CreateTransitGatewayPrefixListReferenceOutput{} + output := &CreateTransitGatewayConnectOutput{} out.Result = output var buff [1024]byte @@ -12389,7 +12190,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayPrefixListReferenceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayConnectOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12403,7 +12204,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayPrefixListReference(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayConnect(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12436,14 +12237,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayPrefixListReference(respo } } -type awsEc2query_deserializeOpCreateTransitGatewayRoute struct { +type awsEc2query_deserializeOpCreateTransitGatewayConnectPeer struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayRoute) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayConnectPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12461,9 +12262,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRoute) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayConnectPeer(response, &metadata) } - output := &CreateTransitGatewayRouteOutput{} + output := &CreateTransitGatewayConnectPeerOutput{} out.Result = output var buff [1024]byte @@ -12484,7 +12285,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRoute) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayConnectPeerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12498,7 +12299,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRoute) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayConnectPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12531,14 +12332,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayRoute(response *smithyhtt } } -type awsEc2query_deserializeOpCreateTransitGatewayRouteTable struct { +type awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayRouteTable) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayMulticastDomain) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12556,9 +12357,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTable) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayMulticastDomain(response, &metadata) } - output := &CreateTransitGatewayRouteTableOutput{} + output := &CreateTransitGatewayMulticastDomainOutput{} out.Result = output var buff [1024]byte @@ -12579,7 +12380,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTable) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayRouteTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayMulticastDomainOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12593,7 +12394,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTable) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayMulticastDomain(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12626,14 +12427,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTable(response *smit } } -type awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement struct { +type awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayPeeringAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12651,9 +12452,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTableAnnouncement(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayPeeringAttachment(response, &metadata) } - output := &CreateTransitGatewayRouteTableAnnouncementOutput{} + output := &CreateTransitGatewayPeeringAttachmentOutput{} out.Result = output var buff [1024]byte @@ -12674,7 +12475,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayRouteTableAnnouncementOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayPeeringAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12688,7 +12489,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTableAnnouncement(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayPeeringAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12721,14 +12522,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTableAnnouncement(re } } -type awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment struct { +type awsEc2query_deserializeOpCreateTransitGatewayPolicyTable struct { } -func (*awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayPolicyTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12746,9 +12547,9 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayVpcAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayPolicyTable(response, &metadata) } - output := &CreateTransitGatewayVpcAttachmentOutput{} + output := &CreateTransitGatewayPolicyTableOutput{} out.Result = output var buff [1024]byte @@ -12769,7 +12570,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateTransitGatewayVpcAttachmentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayPolicyTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12783,7 +12584,7 @@ func (m *awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayPolicyTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12816,14 +12617,14 @@ func awsEc2query_deserializeOpErrorCreateTransitGatewayVpcAttachment(response *s } } -type awsEc2query_deserializeOpCreateVerifiedAccessEndpoint struct { +type awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference struct { } -func (*awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayPrefixListReference) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12841,9 +12642,9 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayPrefixListReference(response, &metadata) } - output := &CreateVerifiedAccessEndpointOutput{} + output := &CreateTransitGatewayPrefixListReferenceOutput{} out.Result = output var buff [1024]byte @@ -12864,7 +12665,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayPrefixListReferenceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12878,7 +12679,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVerifiedAccessEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayPrefixListReference(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12911,14 +12712,14 @@ func awsEc2query_deserializeOpErrorCreateVerifiedAccessEndpoint(response *smithy } } -type awsEc2query_deserializeOpCreateVerifiedAccessGroup struct { +type awsEc2query_deserializeOpCreateTransitGatewayRoute struct { } -func (*awsEc2query_deserializeOpCreateVerifiedAccessGroup) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVerifiedAccessGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12936,9 +12737,9 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessGroup) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayRoute(response, &metadata) } - output := &CreateVerifiedAccessGroupOutput{} + output := &CreateTransitGatewayRouteOutput{} out.Result = output var buff [1024]byte @@ -12959,7 +12760,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessGroup) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12973,7 +12774,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessGroup) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVerifiedAccessGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13006,14 +12807,14 @@ func awsEc2query_deserializeOpErrorCreateVerifiedAccessGroup(response *smithyhtt } } -type awsEc2query_deserializeOpCreateVerifiedAccessInstance struct { +type awsEc2query_deserializeOpCreateTransitGatewayRouteTable struct { } -func (*awsEc2query_deserializeOpCreateVerifiedAccessInstance) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVerifiedAccessInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13031,9 +12832,9 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessInstance) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessInstance(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTable(response, &metadata) } - output := &CreateVerifiedAccessInstanceOutput{} + output := &CreateTransitGatewayRouteTableOutput{} out.Result = output var buff [1024]byte @@ -13054,7 +12855,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessInstance) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessInstanceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayRouteTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13068,7 +12869,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessInstance) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVerifiedAccessInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13101,14 +12902,14 @@ func awsEc2query_deserializeOpErrorCreateVerifiedAccessInstance(response *smithy } } -type awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider struct { +type awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement struct { } -func (*awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayRouteTableAnnouncement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13126,9 +12927,9 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessTrustProvider(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTableAnnouncement(response, &metadata) } - output := &CreateVerifiedAccessTrustProviderOutput{} + output := &CreateTransitGatewayRouteTableAnnouncementOutput{} out.Result = output var buff [1024]byte @@ -13149,7 +12950,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessTrustProviderOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayRouteTableAnnouncementOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13163,7 +12964,7 @@ func (m *awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayRouteTableAnnouncement(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13196,14 +12997,14 @@ func awsEc2query_deserializeOpErrorCreateVerifiedAccessTrustProvider(response *s } } -type awsEc2query_deserializeOpCreateVolume struct { +type awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment struct { } -func (*awsEc2query_deserializeOpCreateVolume) ID() string { +func (*awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13221,9 +13022,9 @@ func (m *awsEc2query_deserializeOpCreateVolume) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVolume(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateTransitGatewayVpcAttachment(response, &metadata) } - output := &CreateVolumeOutput{} + output := &CreateTransitGatewayVpcAttachmentOutput{} out.Result = output var buff [1024]byte @@ -13244,7 +13045,7 @@ func (m *awsEc2query_deserializeOpCreateVolume) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVolumeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateTransitGatewayVpcAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13258,7 +13059,7 @@ func (m *awsEc2query_deserializeOpCreateVolume) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13291,14 +13092,14 @@ func awsEc2query_deserializeOpErrorCreateVolume(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpCreateVpc struct { +type awsEc2query_deserializeOpCreateVerifiedAccessEndpoint struct { } -func (*awsEc2query_deserializeOpCreateVpc) ID() string { +func (*awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVerifiedAccessEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13316,9 +13117,9 @@ func (m *awsEc2query_deserializeOpCreateVpc) HandleDeserialize(ctx context.Conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpc(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessEndpoint(response, &metadata) } - output := &CreateVpcOutput{} + output := &CreateVerifiedAccessEndpointOutput{} out.Result = output var buff [1024]byte @@ -13339,7 +13140,7 @@ func (m *awsEc2query_deserializeOpCreateVpc) HandleDeserialize(ctx context.Conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpcOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13353,7 +13154,7 @@ func (m *awsEc2query_deserializeOpCreateVpc) HandleDeserialize(ctx context.Conte return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVerifiedAccessEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13386,14 +13187,14 @@ func awsEc2query_deserializeOpErrorCreateVpc(response *smithyhttp.Response, meta } } -type awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion struct { +type awsEc2query_deserializeOpCreateVerifiedAccessGroup struct { } -func (*awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) ID() string { +func (*awsEc2query_deserializeOpCreateVerifiedAccessGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVerifiedAccessGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13411,9 +13212,9 @@ func (m *awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpcBlockPublicAccessExclusion(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessGroup(response, &metadata) } - output := &CreateVpcBlockPublicAccessExclusionOutput{} + output := &CreateVerifiedAccessGroupOutput{} out.Result = output var buff [1024]byte @@ -13434,7 +13235,7 @@ func (m *awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpcBlockPublicAccessExclusionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13448,7 +13249,7 @@ func (m *awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpcBlockPublicAccessExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVerifiedAccessGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13481,14 +13282,14 @@ func awsEc2query_deserializeOpErrorCreateVpcBlockPublicAccessExclusion(response } } -type awsEc2query_deserializeOpCreateVpcEndpoint struct { +type awsEc2query_deserializeOpCreateVerifiedAccessInstance struct { } -func (*awsEc2query_deserializeOpCreateVpcEndpoint) ID() string { +func (*awsEc2query_deserializeOpCreateVerifiedAccessInstance) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpcEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVerifiedAccessInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13506,9 +13307,9 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpoint) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpcEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessInstance(response, &metadata) } - output := &CreateVpcEndpointOutput{} + output := &CreateVerifiedAccessInstanceOutput{} out.Result = output var buff [1024]byte @@ -13529,7 +13330,7 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpoint) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpcEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessInstanceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13543,7 +13344,7 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpoint) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpcEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVerifiedAccessInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13576,14 +13377,14 @@ func awsEc2query_deserializeOpErrorCreateVpcEndpoint(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification struct { +type awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider struct { } -func (*awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) ID() string { +func (*awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13601,9 +13402,9 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpcEndpointConnectionNotification(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVerifiedAccessTrustProvider(response, &metadata) } - output := &CreateVpcEndpointConnectionNotificationOutput{} + output := &CreateVerifiedAccessTrustProviderOutput{} out.Result = output var buff [1024]byte @@ -13624,7 +13425,7 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpcEndpointConnectionNotificationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVerifiedAccessTrustProviderOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13638,7 +13439,7 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpcEndpointConnectionNotification(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13671,14 +13472,14 @@ func awsEc2query_deserializeOpErrorCreateVpcEndpointConnectionNotification(respo } } -type awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration struct { +type awsEc2query_deserializeOpCreateVolume struct { } -func (*awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) ID() string { +func (*awsEc2query_deserializeOpCreateVolume) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13696,9 +13497,9 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpcEndpointServiceConfiguration(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVolume(response, &metadata) } - output := &CreateVpcEndpointServiceConfigurationOutput{} + output := &CreateVolumeOutput{} out.Result = output var buff [1024]byte @@ -13719,7 +13520,7 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpcEndpointServiceConfigurationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVolumeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13733,7 +13534,7 @@ func (m *awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpcEndpointServiceConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13766,14 +13567,14 @@ func awsEc2query_deserializeOpErrorCreateVpcEndpointServiceConfiguration(respons } } -type awsEc2query_deserializeOpCreateVpcPeeringConnection struct { +type awsEc2query_deserializeOpCreateVpc struct { } -func (*awsEc2query_deserializeOpCreateVpcPeeringConnection) ID() string { +func (*awsEc2query_deserializeOpCreateVpc) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpcPeeringConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13791,9 +13592,9 @@ func (m *awsEc2query_deserializeOpCreateVpcPeeringConnection) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpcPeeringConnection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpc(response, &metadata) } - output := &CreateVpcPeeringConnectionOutput{} + output := &CreateVpcOutput{} out.Result = output var buff [1024]byte @@ -13814,7 +13615,7 @@ func (m *awsEc2query_deserializeOpCreateVpcPeeringConnection) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpcPeeringConnectionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpcOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13828,7 +13629,7 @@ func (m *awsEc2query_deserializeOpCreateVpcPeeringConnection) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpcPeeringConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13861,14 +13662,14 @@ func awsEc2query_deserializeOpErrorCreateVpcPeeringConnection(response *smithyht } } -type awsEc2query_deserializeOpCreateVpnConnection struct { +type awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion struct { } -func (*awsEc2query_deserializeOpCreateVpnConnection) ID() string { +func (*awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpnConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpcBlockPublicAccessExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13886,9 +13687,9 @@ func (m *awsEc2query_deserializeOpCreateVpnConnection) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpnConnection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpcBlockPublicAccessExclusion(response, &metadata) } - output := &CreateVpnConnectionOutput{} + output := &CreateVpcBlockPublicAccessExclusionOutput{} out.Result = output var buff [1024]byte @@ -13909,7 +13710,7 @@ func (m *awsEc2query_deserializeOpCreateVpnConnection) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpnConnectionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpcBlockPublicAccessExclusionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13923,79 +13724,7 @@ func (m *awsEc2query_deserializeOpCreateVpnConnection) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpnConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpCreateVpnConnectionRoute struct { -} - -func (*awsEc2query_deserializeOpCreateVpnConnectionRoute) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpCreateVpnConnectionRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpnConnectionRoute(response, &metadata) - } - output := &CreateVpnConnectionRouteOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorCreateVpnConnectionRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpcBlockPublicAccessExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14028,14 +13757,14 @@ func awsEc2query_deserializeOpErrorCreateVpnConnectionRoute(response *smithyhttp } } -type awsEc2query_deserializeOpCreateVpnGateway struct { +type awsEc2query_deserializeOpCreateVpcEndpoint struct { } -func (*awsEc2query_deserializeOpCreateVpnGateway) ID() string { +func (*awsEc2query_deserializeOpCreateVpcEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpCreateVpnGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpcEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14053,9 +13782,9 @@ func (m *awsEc2query_deserializeOpCreateVpnGateway) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorCreateVpnGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpcEndpoint(response, &metadata) } - output := &CreateVpnGatewayOutput{} + output := &CreateVpcEndpointOutput{} out.Result = output var buff [1024]byte @@ -14076,7 +13805,7 @@ func (m *awsEc2query_deserializeOpCreateVpnGateway) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentCreateVpnGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpcEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14090,7 +13819,7 @@ func (m *awsEc2query_deserializeOpCreateVpnGateway) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorCreateVpnGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpcEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14123,14 +13852,14 @@ func awsEc2query_deserializeOpErrorCreateVpnGateway(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDeleteCarrierGateway struct { +type awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification struct { } -func (*awsEc2query_deserializeOpDeleteCarrierGateway) ID() string { +func (*awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteCarrierGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpcEndpointConnectionNotification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14148,9 +13877,9 @@ func (m *awsEc2query_deserializeOpDeleteCarrierGateway) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteCarrierGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpcEndpointConnectionNotification(response, &metadata) } - output := &DeleteCarrierGatewayOutput{} + output := &CreateVpcEndpointConnectionNotificationOutput{} out.Result = output var buff [1024]byte @@ -14171,7 +13900,7 @@ func (m *awsEc2query_deserializeOpDeleteCarrierGateway) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteCarrierGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpcEndpointConnectionNotificationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14185,7 +13914,7 @@ func (m *awsEc2query_deserializeOpDeleteCarrierGateway) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteCarrierGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpcEndpointConnectionNotification(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14218,14 +13947,14 @@ func awsEc2query_deserializeOpErrorDeleteCarrierGateway(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeleteClientVpnEndpoint struct { +type awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration struct { } -func (*awsEc2query_deserializeOpDeleteClientVpnEndpoint) ID() string { +func (*awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteClientVpnEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpcEndpointServiceConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14243,9 +13972,9 @@ func (m *awsEc2query_deserializeOpDeleteClientVpnEndpoint) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteClientVpnEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpcEndpointServiceConfiguration(response, &metadata) } - output := &DeleteClientVpnEndpointOutput{} + output := &CreateVpcEndpointServiceConfigurationOutput{} out.Result = output var buff [1024]byte @@ -14266,7 +13995,7 @@ func (m *awsEc2query_deserializeOpDeleteClientVpnEndpoint) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteClientVpnEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpcEndpointServiceConfigurationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14280,7 +14009,7 @@ func (m *awsEc2query_deserializeOpDeleteClientVpnEndpoint) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteClientVpnEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpcEndpointServiceConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14313,14 +14042,14 @@ func awsEc2query_deserializeOpErrorDeleteClientVpnEndpoint(response *smithyhttp. } } -type awsEc2query_deserializeOpDeleteClientVpnRoute struct { +type awsEc2query_deserializeOpCreateVpcPeeringConnection struct { } -func (*awsEc2query_deserializeOpDeleteClientVpnRoute) ID() string { +func (*awsEc2query_deserializeOpCreateVpcPeeringConnection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteClientVpnRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpcPeeringConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14338,9 +14067,9 @@ func (m *awsEc2query_deserializeOpDeleteClientVpnRoute) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteClientVpnRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpcPeeringConnection(response, &metadata) } - output := &DeleteClientVpnRouteOutput{} + output := &CreateVpcPeeringConnectionOutput{} out.Result = output var buff [1024]byte @@ -14361,7 +14090,7 @@ func (m *awsEc2query_deserializeOpDeleteClientVpnRoute) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteClientVpnRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpcPeeringConnectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14375,7 +14104,7 @@ func (m *awsEc2query_deserializeOpDeleteClientVpnRoute) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteClientVpnRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpcPeeringConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14408,14 +14137,14 @@ func awsEc2query_deserializeOpErrorDeleteClientVpnRoute(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeleteCoipCidr struct { +type awsEc2query_deserializeOpCreateVpnConnection struct { } -func (*awsEc2query_deserializeOpDeleteCoipCidr) ID() string { +func (*awsEc2query_deserializeOpCreateVpnConnection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteCoipCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpnConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14433,9 +14162,9 @@ func (m *awsEc2query_deserializeOpDeleteCoipCidr) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteCoipCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpnConnection(response, &metadata) } - output := &DeleteCoipCidrOutput{} + output := &CreateVpnConnectionOutput{} out.Result = output var buff [1024]byte @@ -14456,7 +14185,7 @@ func (m *awsEc2query_deserializeOpDeleteCoipCidr) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteCoipCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentCreateVpnConnectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14470,7 +14199,7 @@ func (m *awsEc2query_deserializeOpDeleteCoipCidr) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteCoipCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpnConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14503,14 +14232,14 @@ func awsEc2query_deserializeOpErrorDeleteCoipCidr(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDeleteCoipPool struct { +type awsEc2query_deserializeOpCreateVpnConnectionRoute struct { } -func (*awsEc2query_deserializeOpDeleteCoipPool) ID() string { +func (*awsEc2query_deserializeOpCreateVpnConnectionRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteCoipPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpnConnectionRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14528,44 +14257,21 @@ func (m *awsEc2query_deserializeOpDeleteCoipPool) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteCoipPool(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpnConnectionRoute(response, &metadata) } - output := &DeleteCoipPoolOutput{} + output := &CreateVpnConnectionRouteOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteCoipPoolOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteCoipPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpnConnectionRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14598,14 +14304,14 @@ func awsEc2query_deserializeOpErrorDeleteCoipPool(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDeleteCustomerGateway struct { +type awsEc2query_deserializeOpCreateVpnGateway struct { } -func (*awsEc2query_deserializeOpDeleteCustomerGateway) ID() string { +func (*awsEc2query_deserializeOpCreateVpnGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteCustomerGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpCreateVpnGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14623,93 +14329,44 @@ func (m *awsEc2query_deserializeOpDeleteCustomerGateway) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteCustomerGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorCreateVpnGateway(response, &metadata) } - output := &DeleteCustomerGatewayOutput{} + output := &CreateVpnGatewayOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorDeleteCustomerGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } - return genericError - } -} - -type awsEc2query_deserializeOpDeleteDhcpOptions struct { -} - -func (*awsEc2query_deserializeOpDeleteDhcpOptions) ID() string { - return "OperationDeserializer" -} -func (m *awsEc2query_deserializeOpDeleteDhcpOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentCreateVpnGatewayOutput(&output, decoder) if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteDhcpOptions(response, &metadata) - } - output := &DeleteDhcpOptionsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteDhcpOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorCreateVpnGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14742,14 +14399,14 @@ func awsEc2query_deserializeOpErrorDeleteDhcpOptions(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway struct { +type awsEc2query_deserializeOpDeleteCarrierGateway struct { } -func (*awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) ID() string { +func (*awsEc2query_deserializeOpDeleteCarrierGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteCarrierGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14767,9 +14424,9 @@ func (m *awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteEgressOnlyInternetGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteCarrierGateway(response, &metadata) } - output := &DeleteEgressOnlyInternetGatewayOutput{} + output := &DeleteCarrierGatewayOutput{} out.Result = output var buff [1024]byte @@ -14790,7 +14447,7 @@ func (m *awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteEgressOnlyInternetGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteCarrierGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14804,7 +14461,7 @@ func (m *awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteEgressOnlyInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteCarrierGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14837,14 +14494,14 @@ func awsEc2query_deserializeOpErrorDeleteEgressOnlyInternetGateway(response *smi } } -type awsEc2query_deserializeOpDeleteFleets struct { +type awsEc2query_deserializeOpDeleteClientVpnEndpoint struct { } -func (*awsEc2query_deserializeOpDeleteFleets) ID() string { +func (*awsEc2query_deserializeOpDeleteClientVpnEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteFleets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteClientVpnEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14862,9 +14519,9 @@ func (m *awsEc2query_deserializeOpDeleteFleets) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteFleets(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteClientVpnEndpoint(response, &metadata) } - output := &DeleteFleetsOutput{} + output := &DeleteClientVpnEndpointOutput{} out.Result = output var buff [1024]byte @@ -14885,7 +14542,7 @@ func (m *awsEc2query_deserializeOpDeleteFleets) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteFleetsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteClientVpnEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14899,7 +14556,7 @@ func (m *awsEc2query_deserializeOpDeleteFleets) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteFleets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteClientVpnEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14932,14 +14589,14 @@ func awsEc2query_deserializeOpErrorDeleteFleets(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDeleteFlowLogs struct { +type awsEc2query_deserializeOpDeleteClientVpnRoute struct { } -func (*awsEc2query_deserializeOpDeleteFlowLogs) ID() string { +func (*awsEc2query_deserializeOpDeleteClientVpnRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteFlowLogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteClientVpnRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14957,9 +14614,9 @@ func (m *awsEc2query_deserializeOpDeleteFlowLogs) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteFlowLogs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteClientVpnRoute(response, &metadata) } - output := &DeleteFlowLogsOutput{} + output := &DeleteClientVpnRouteOutput{} out.Result = output var buff [1024]byte @@ -14980,7 +14637,7 @@ func (m *awsEc2query_deserializeOpDeleteFlowLogs) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteFlowLogsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteClientVpnRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14994,7 +14651,7 @@ func (m *awsEc2query_deserializeOpDeleteFlowLogs) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteFlowLogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteClientVpnRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15027,14 +14684,14 @@ func awsEc2query_deserializeOpErrorDeleteFlowLogs(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDeleteFpgaImage struct { +type awsEc2query_deserializeOpDeleteCoipCidr struct { } -func (*awsEc2query_deserializeOpDeleteFpgaImage) ID() string { +func (*awsEc2query_deserializeOpDeleteCoipCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteFpgaImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteCoipCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15052,9 +14709,9 @@ func (m *awsEc2query_deserializeOpDeleteFpgaImage) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteFpgaImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteCoipCidr(response, &metadata) } - output := &DeleteFpgaImageOutput{} + output := &DeleteCoipCidrOutput{} out.Result = output var buff [1024]byte @@ -15075,7 +14732,7 @@ func (m *awsEc2query_deserializeOpDeleteFpgaImage) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteFpgaImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteCoipCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15089,7 +14746,7 @@ func (m *awsEc2query_deserializeOpDeleteFpgaImage) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteFpgaImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteCoipCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15122,14 +14779,14 @@ func awsEc2query_deserializeOpErrorDeleteFpgaImage(response *smithyhttp.Response } } -type awsEc2query_deserializeOpDeleteInstanceConnectEndpoint struct { +type awsEc2query_deserializeOpDeleteCoipPool struct { } -func (*awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) ID() string { +func (*awsEc2query_deserializeOpDeleteCoipPool) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteCoipPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15147,9 +14804,9 @@ func (m *awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteInstanceConnectEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteCoipPool(response, &metadata) } - output := &DeleteInstanceConnectEndpointOutput{} + output := &DeleteCoipPoolOutput{} out.Result = output var buff [1024]byte @@ -15170,7 +14827,7 @@ func (m *awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteInstanceConnectEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteCoipPoolOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15184,7 +14841,7 @@ func (m *awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteInstanceConnectEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteCoipPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15217,14 +14874,14 @@ func awsEc2query_deserializeOpErrorDeleteInstanceConnectEndpoint(response *smith } } -type awsEc2query_deserializeOpDeleteInstanceEventWindow struct { +type awsEc2query_deserializeOpDeleteCustomerGateway struct { } -func (*awsEc2query_deserializeOpDeleteInstanceEventWindow) ID() string { +func (*awsEc2query_deserializeOpDeleteCustomerGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteCustomerGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15242,44 +14899,21 @@ func (m *awsEc2query_deserializeOpDeleteInstanceEventWindow) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteInstanceEventWindow(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteCustomerGateway(response, &metadata) } - output := &DeleteInstanceEventWindowOutput{} + output := &DeleteCustomerGatewayOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteInstanceEventWindowOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteCustomerGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15312,14 +14946,14 @@ func awsEc2query_deserializeOpErrorDeleteInstanceEventWindow(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteInternetGateway struct { +type awsEc2query_deserializeOpDeleteDhcpOptions struct { } -func (*awsEc2query_deserializeOpDeleteInternetGateway) ID() string { +func (*awsEc2query_deserializeOpDeleteDhcpOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteDhcpOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15337,9 +14971,9 @@ func (m *awsEc2query_deserializeOpDeleteInternetGateway) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteInternetGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteDhcpOptions(response, &metadata) } - output := &DeleteInternetGatewayOutput{} + output := &DeleteDhcpOptionsOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -15351,7 +14985,7 @@ func (m *awsEc2query_deserializeOpDeleteInternetGateway) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteDhcpOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15384,14 +15018,14 @@ func awsEc2query_deserializeOpErrorDeleteInternetGateway(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDeleteIpam struct { +type awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway struct { } -func (*awsEc2query_deserializeOpDeleteIpam) ID() string { +func (*awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteEgressOnlyInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15409,9 +15043,9 @@ func (m *awsEc2query_deserializeOpDeleteIpam) HandleDeserialize(ctx context.Cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteIpam(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteEgressOnlyInternetGateway(response, &metadata) } - output := &DeleteIpamOutput{} + output := &DeleteEgressOnlyInternetGatewayOutput{} out.Result = output var buff [1024]byte @@ -15432,7 +15066,7 @@ func (m *awsEc2query_deserializeOpDeleteIpam) HandleDeserialize(ctx context.Cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteIpamOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteEgressOnlyInternetGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15446,7 +15080,7 @@ func (m *awsEc2query_deserializeOpDeleteIpam) HandleDeserialize(ctx context.Cont return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteEgressOnlyInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15479,14 +15113,14 @@ func awsEc2query_deserializeOpErrorDeleteIpam(response *smithyhttp.Response, met } } -type awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken struct { +type awsEc2query_deserializeOpDeleteFleets struct { } -func (*awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) ID() string { +func (*awsEc2query_deserializeOpDeleteFleets) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteFleets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15504,9 +15138,9 @@ func (m *awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamExternalResourceVerificationToken(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteFleets(response, &metadata) } - output := &DeleteIpamExternalResourceVerificationTokenOutput{} + output := &DeleteFleetsOutput{} out.Result = output var buff [1024]byte @@ -15527,7 +15161,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteIpamExternalResourceVerificationTokenOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteFleetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15541,7 +15175,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) H return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteIpamExternalResourceVerificationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteFleets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15574,14 +15208,14 @@ func awsEc2query_deserializeOpErrorDeleteIpamExternalResourceVerificationToken(r } } -type awsEc2query_deserializeOpDeleteIpamPool struct { +type awsEc2query_deserializeOpDeleteFlowLogs struct { } -func (*awsEc2query_deserializeOpDeleteIpamPool) ID() string { +func (*awsEc2query_deserializeOpDeleteFlowLogs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteIpamPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteFlowLogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15599,9 +15233,9 @@ func (m *awsEc2query_deserializeOpDeleteIpamPool) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamPool(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteFlowLogs(response, &metadata) } - output := &DeleteIpamPoolOutput{} + output := &DeleteFlowLogsOutput{} out.Result = output var buff [1024]byte @@ -15622,7 +15256,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamPool) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteIpamPoolOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteFlowLogsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15636,7 +15270,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamPool) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteIpamPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteFlowLogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15669,14 +15303,14 @@ func awsEc2query_deserializeOpErrorDeleteIpamPool(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDeleteIpamResourceDiscovery struct { +type awsEc2query_deserializeOpDeleteFpgaImage struct { } -func (*awsEc2query_deserializeOpDeleteIpamResourceDiscovery) ID() string { +func (*awsEc2query_deserializeOpDeleteFpgaImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteFpgaImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15694,9 +15328,9 @@ func (m *awsEc2query_deserializeOpDeleteIpamResourceDiscovery) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamResourceDiscovery(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteFpgaImage(response, &metadata) } - output := &DeleteIpamResourceDiscoveryOutput{} + output := &DeleteFpgaImageOutput{} out.Result = output var buff [1024]byte @@ -15717,7 +15351,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamResourceDiscovery) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteIpamResourceDiscoveryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteFpgaImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15731,7 +15365,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamResourceDiscovery) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteFpgaImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15764,14 +15398,14 @@ func awsEc2query_deserializeOpErrorDeleteIpamResourceDiscovery(response *smithyh } } -type awsEc2query_deserializeOpDeleteIpamScope struct { +type awsEc2query_deserializeOpDeleteImageUsageReport struct { } -func (*awsEc2query_deserializeOpDeleteIpamScope) ID() string { +func (*awsEc2query_deserializeOpDeleteImageUsageReport) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteIpamScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteImageUsageReport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15789,9 +15423,9 @@ func (m *awsEc2query_deserializeOpDeleteIpamScope) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamScope(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteImageUsageReport(response, &metadata) } - output := &DeleteIpamScopeOutput{} + output := &DeleteImageUsageReportOutput{} out.Result = output var buff [1024]byte @@ -15812,7 +15446,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamScope) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteIpamScopeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteImageUsageReportOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15826,7 +15460,7 @@ func (m *awsEc2query_deserializeOpDeleteIpamScope) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteIpamScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteImageUsageReport(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15859,14 +15493,14 @@ func awsEc2query_deserializeOpErrorDeleteIpamScope(response *smithyhttp.Response } } -type awsEc2query_deserializeOpDeleteKeyPair struct { +type awsEc2query_deserializeOpDeleteInstanceConnectEndpoint struct { } -func (*awsEc2query_deserializeOpDeleteKeyPair) ID() string { +func (*awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteInstanceConnectEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15884,9 +15518,9 @@ func (m *awsEc2query_deserializeOpDeleteKeyPair) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteKeyPair(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteInstanceConnectEndpoint(response, &metadata) } - output := &DeleteKeyPairOutput{} + output := &DeleteInstanceConnectEndpointOutput{} out.Result = output var buff [1024]byte @@ -15907,7 +15541,7 @@ func (m *awsEc2query_deserializeOpDeleteKeyPair) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteKeyPairOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteInstanceConnectEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -15921,7 +15555,7 @@ func (m *awsEc2query_deserializeOpDeleteKeyPair) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteInstanceConnectEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -15954,14 +15588,14 @@ func awsEc2query_deserializeOpErrorDeleteKeyPair(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDeleteLaunchTemplate struct { +type awsEc2query_deserializeOpDeleteInstanceEventWindow struct { } -func (*awsEc2query_deserializeOpDeleteLaunchTemplate) ID() string { +func (*awsEc2query_deserializeOpDeleteInstanceEventWindow) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLaunchTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -15979,9 +15613,9 @@ func (m *awsEc2query_deserializeOpDeleteLaunchTemplate) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLaunchTemplate(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteInstanceEventWindow(response, &metadata) } - output := &DeleteLaunchTemplateOutput{} + output := &DeleteInstanceEventWindowOutput{} out.Result = output var buff [1024]byte @@ -16002,7 +15636,7 @@ func (m *awsEc2query_deserializeOpDeleteLaunchTemplate) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLaunchTemplateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteInstanceEventWindowOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16016,7 +15650,7 @@ func (m *awsEc2query_deserializeOpDeleteLaunchTemplate) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLaunchTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16049,14 +15683,14 @@ func awsEc2query_deserializeOpErrorDeleteLaunchTemplate(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeleteLaunchTemplateVersions struct { +type awsEc2query_deserializeOpDeleteInternetGateway struct { } -func (*awsEc2query_deserializeOpDeleteLaunchTemplateVersions) ID() string { +func (*awsEc2query_deserializeOpDeleteInternetGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLaunchTemplateVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16074,44 +15708,21 @@ func (m *awsEc2query_deserializeOpDeleteLaunchTemplateVersions) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLaunchTemplateVersions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteInternetGateway(response, &metadata) } - output := &DeleteLaunchTemplateVersionsOutput{} + output := &DeleteInternetGatewayOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLaunchTemplateVersionsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLaunchTemplateVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16144,14 +15755,14 @@ func awsEc2query_deserializeOpErrorDeleteLaunchTemplateVersions(response *smithy } } -type awsEc2query_deserializeOpDeleteLocalGatewayRoute struct { +type awsEc2query_deserializeOpDeleteIpam struct { } -func (*awsEc2query_deserializeOpDeleteLocalGatewayRoute) ID() string { +func (*awsEc2query_deserializeOpDeleteIpam) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLocalGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16169,9 +15780,9 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRoute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteIpam(response, &metadata) } - output := &DeleteLocalGatewayRouteOutput{} + output := &DeleteIpamOutput{} out.Result = output var buff [1024]byte @@ -16192,7 +15803,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRoute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteIpamOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16206,7 +15817,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRoute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLocalGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16239,14 +15850,14 @@ func awsEc2query_deserializeOpErrorDeleteLocalGatewayRoute(response *smithyhttp. } } -type awsEc2query_deserializeOpDeleteLocalGatewayRouteTable struct { +type awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken struct { } -func (*awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) ID() string { +func (*awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteIpamExternalResourceVerificationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16264,9 +15875,9 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamExternalResourceVerificationToken(response, &metadata) } - output := &DeleteLocalGatewayRouteTableOutput{} + output := &DeleteIpamExternalResourceVerificationTokenOutput{} out.Result = output var buff [1024]byte @@ -16287,7 +15898,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteIpamExternalResourceVerificationTokenOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16301,7 +15912,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteIpamExternalResourceVerificationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16334,14 +15945,14 @@ func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTable(response *smithy } } -type awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { +type awsEc2query_deserializeOpDeleteIpamPool struct { } -func (*awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation) ID() string { +func (*awsEc2query_deserializeOpDeleteIpamPool) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteIpamPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16359,9 +15970,9 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGr } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamPool(response, &metadata) } - output := &DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput{} + output := &DeleteIpamPoolOutput{} out.Result = output var buff [1024]byte @@ -16382,7 +15993,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGr } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteIpamPoolOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16396,7 +16007,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGr return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteIpamPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16429,14 +16040,14 @@ func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVirtualInterfaceG } } -type awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation struct { +type awsEc2query_deserializeOpDeleteIpamResourceDiscovery struct { } -func (*awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) ID() string { +func (*awsEc2query_deserializeOpDeleteIpamResourceDiscovery) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16454,9 +16065,9 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVpcAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamResourceDiscovery(response, &metadata) } - output := &DeleteLocalGatewayRouteTableVpcAssociationOutput{} + output := &DeleteIpamResourceDiscoveryOutput{} out.Result = output var buff [1024]byte @@ -16477,7 +16088,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteTableVpcAssociationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteIpamResourceDiscoveryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16491,7 +16102,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVpcAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16524,14 +16135,14 @@ func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVpcAssociation(re } } -type awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface struct { +type awsEc2query_deserializeOpDeleteIpamScope struct { } -func (*awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) ID() string { +func (*awsEc2query_deserializeOpDeleteIpamScope) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteIpamScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16549,9 +16160,9 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterface(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteIpamScope(response, &metadata) } - output := &DeleteLocalGatewayVirtualInterfaceOutput{} + output := &DeleteIpamScopeOutput{} out.Result = output var buff [1024]byte @@ -16572,7 +16183,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayVirtualInterfaceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteIpamScopeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16586,7 +16197,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteIpamScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16619,14 +16230,14 @@ func awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterface(response * } } -type awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup struct { +type awsEc2query_deserializeOpDeleteKeyPair struct { } -func (*awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) ID() string { +func (*awsEc2query_deserializeOpDeleteKeyPair) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16644,9 +16255,9 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterfaceGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteKeyPair(response, &metadata) } - output := &DeleteLocalGatewayVirtualInterfaceGroupOutput{} + output := &DeleteKeyPairOutput{} out.Result = output var buff [1024]byte @@ -16667,7 +16278,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayVirtualInterfaceGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteKeyPairOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16681,7 +16292,7 @@ func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterfaceGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16714,14 +16325,14 @@ func awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterfaceGroup(respo } } -type awsEc2query_deserializeOpDeleteManagedPrefixList struct { +type awsEc2query_deserializeOpDeleteLaunchTemplate struct { } -func (*awsEc2query_deserializeOpDeleteManagedPrefixList) ID() string { +func (*awsEc2query_deserializeOpDeleteLaunchTemplate) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteManagedPrefixList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLaunchTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16739,9 +16350,9 @@ func (m *awsEc2query_deserializeOpDeleteManagedPrefixList) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteManagedPrefixList(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLaunchTemplate(response, &metadata) } - output := &DeleteManagedPrefixListOutput{} + output := &DeleteLaunchTemplateOutput{} out.Result = output var buff [1024]byte @@ -16762,7 +16373,7 @@ func (m *awsEc2query_deserializeOpDeleteManagedPrefixList) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteManagedPrefixListOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteLaunchTemplateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16776,7 +16387,7 @@ func (m *awsEc2query_deserializeOpDeleteManagedPrefixList) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteManagedPrefixList(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLaunchTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16809,14 +16420,14 @@ func awsEc2query_deserializeOpErrorDeleteManagedPrefixList(response *smithyhttp. } } -type awsEc2query_deserializeOpDeleteNatGateway struct { +type awsEc2query_deserializeOpDeleteLaunchTemplateVersions struct { } -func (*awsEc2query_deserializeOpDeleteNatGateway) ID() string { +func (*awsEc2query_deserializeOpDeleteLaunchTemplateVersions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNatGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLaunchTemplateVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16834,9 +16445,9 @@ func (m *awsEc2query_deserializeOpDeleteNatGateway) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNatGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLaunchTemplateVersions(response, &metadata) } - output := &DeleteNatGatewayOutput{} + output := &DeleteLaunchTemplateVersionsOutput{} out.Result = output var buff [1024]byte @@ -16857,7 +16468,7 @@ func (m *awsEc2query_deserializeOpDeleteNatGateway) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteNatGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteLaunchTemplateVersionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16871,7 +16482,7 @@ func (m *awsEc2query_deserializeOpDeleteNatGateway) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNatGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLaunchTemplateVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16904,14 +16515,14 @@ func awsEc2query_deserializeOpErrorDeleteNatGateway(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDeleteNetworkAcl struct { +type awsEc2query_deserializeOpDeleteLocalGatewayRoute struct { } -func (*awsEc2query_deserializeOpDeleteNetworkAcl) ID() string { +func (*awsEc2query_deserializeOpDeleteLocalGatewayRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLocalGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16929,21 +16540,44 @@ func (m *awsEc2query_deserializeOpDeleteNetworkAcl) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkAcl(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRoute(response, &metadata) } - output := &DeleteNetworkAclOutput{} + output := &DeleteLocalGatewayRouteOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLocalGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16976,14 +16610,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkAcl(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDeleteNetworkAclEntry struct { +type awsEc2query_deserializeOpDeleteLocalGatewayRouteTable struct { } -func (*awsEc2query_deserializeOpDeleteNetworkAclEntry) ID() string { +func (*awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkAclEntry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17001,21 +16635,44 @@ func (m *awsEc2query_deserializeOpDeleteNetworkAclEntry) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkAclEntry(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTable(response, &metadata) } - output := &DeleteNetworkAclEntryOutput{} + output := &DeleteLocalGatewayRouteTableOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteTableOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkAclEntry(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17048,14 +16705,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkAclEntry(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope struct { +type awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { } -func (*awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) ID() string { +func (*awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17073,9 +16730,9 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScope(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response, &metadata) } - output := &DeleteNetworkInsightsAccessScopeOutput{} + output := &DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput{} out.Result = output var buff [1024]byte @@ -17096,7 +16753,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsAccessScopeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17110,7 +16767,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17143,14 +16800,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScope(response *sm } } -type awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis struct { +type awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation struct { } -func (*awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) ID() string { +func (*awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLocalGatewayRouteTableVpcAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17168,9 +16825,9 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScopeAnalysis(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVpcAssociation(response, &metadata) } - output := &DeleteNetworkInsightsAccessScopeAnalysisOutput{} + output := &DeleteLocalGatewayRouteTableVpcAssociationOutput{} out.Result = output var buff [1024]byte @@ -17191,7 +16848,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsAccessScopeAnalysisOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayRouteTableVpcAssociationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17205,7 +16862,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScopeAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLocalGatewayRouteTableVpcAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17238,14 +16895,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScopeAnalysis(resp } } -type awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis struct { +type awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface struct { } -func (*awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) ID() string { +func (*awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17263,9 +16920,9 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsAnalysis(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterface(response, &metadata) } - output := &DeleteNetworkInsightsAnalysisOutput{} + output := &DeleteLocalGatewayVirtualInterfaceOutput{} out.Result = output var buff [1024]byte @@ -17286,7 +16943,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsAnalysisOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayVirtualInterfaceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17300,7 +16957,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17333,14 +16990,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAnalysis(response *smith } } -type awsEc2query_deserializeOpDeleteNetworkInsightsPath struct { +type awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup struct { } -func (*awsEc2query_deserializeOpDeleteNetworkInsightsPath) ID() string { +func (*awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkInsightsPath) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteLocalGatewayVirtualInterfaceGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17358,9 +17015,9 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsPath) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsPath(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterfaceGroup(response, &metadata) } - output := &DeleteNetworkInsightsPathOutput{} + output := &DeleteLocalGatewayVirtualInterfaceGroupOutput{} out.Result = output var buff [1024]byte @@ -17381,7 +17038,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsPath) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsPathOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteLocalGatewayVirtualInterfaceGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17395,7 +17052,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInsightsPath) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkInsightsPath(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteLocalGatewayVirtualInterfaceGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17428,14 +17085,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkInsightsPath(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteNetworkInterface struct { +type awsEc2query_deserializeOpDeleteManagedPrefixList struct { } -func (*awsEc2query_deserializeOpDeleteNetworkInterface) ID() string { +func (*awsEc2query_deserializeOpDeleteManagedPrefixList) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteManagedPrefixList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17453,21 +17110,44 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInterface) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInterface(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteManagedPrefixList(response, &metadata) } - output := &DeleteNetworkInterfaceOutput{} + output := &DeleteManagedPrefixListOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteManagedPrefixListOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteManagedPrefixList(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17500,14 +17180,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkInterface(response *smithyhttp.R } } -type awsEc2query_deserializeOpDeleteNetworkInterfacePermission struct { +type awsEc2query_deserializeOpDeleteNatGateway struct { } -func (*awsEc2query_deserializeOpDeleteNetworkInterfacePermission) ID() string { +func (*awsEc2query_deserializeOpDeleteNatGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteNetworkInterfacePermission) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNatGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17525,9 +17205,9 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInterfacePermission) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInterfacePermission(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNatGateway(response, &metadata) } - output := &DeleteNetworkInterfacePermissionOutput{} + output := &DeleteNatGatewayOutput{} out.Result = output var buff [1024]byte @@ -17548,7 +17228,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInterfacePermission) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteNetworkInterfacePermissionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteNatGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17562,7 +17242,7 @@ func (m *awsEc2query_deserializeOpDeleteNetworkInterfacePermission) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteNetworkInterfacePermission(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNatGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17595,14 +17275,14 @@ func awsEc2query_deserializeOpErrorDeleteNetworkInterfacePermission(response *sm } } -type awsEc2query_deserializeOpDeletePlacementGroup struct { +type awsEc2query_deserializeOpDeleteNetworkAcl struct { } -func (*awsEc2query_deserializeOpDeletePlacementGroup) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkAcl) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeletePlacementGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17620,9 +17300,9 @@ func (m *awsEc2query_deserializeOpDeletePlacementGroup) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeletePlacementGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkAcl(response, &metadata) } - output := &DeletePlacementGroupOutput{} + output := &DeleteNetworkAclOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -17634,7 +17314,7 @@ func (m *awsEc2query_deserializeOpDeletePlacementGroup) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeletePlacementGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17667,14 +17347,14 @@ func awsEc2query_deserializeOpErrorDeletePlacementGroup(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeletePublicIpv4Pool struct { +type awsEc2query_deserializeOpDeleteNetworkAclEntry struct { } -func (*awsEc2query_deserializeOpDeletePublicIpv4Pool) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkAclEntry) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeletePublicIpv4Pool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkAclEntry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17692,44 +17372,21 @@ func (m *awsEc2query_deserializeOpDeletePublicIpv4Pool) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeletePublicIpv4Pool(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkAclEntry(response, &metadata) } - output := &DeletePublicIpv4PoolOutput{} + output := &DeleteNetworkAclEntryOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeletePublicIpv4PoolOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeletePublicIpv4Pool(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkAclEntry(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17762,14 +17419,14 @@ func awsEc2query_deserializeOpErrorDeletePublicIpv4Pool(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeleteQueuedReservedInstances struct { +type awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope struct { } -func (*awsEc2query_deserializeOpDeleteQueuedReservedInstances) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteQueuedReservedInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17787,9 +17444,9 @@ func (m *awsEc2query_deserializeOpDeleteQueuedReservedInstances) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteQueuedReservedInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScope(response, &metadata) } - output := &DeleteQueuedReservedInstancesOutput{} + output := &DeleteNetworkInsightsAccessScopeOutput{} out.Result = output var buff [1024]byte @@ -17810,7 +17467,7 @@ func (m *awsEc2query_deserializeOpDeleteQueuedReservedInstances) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteQueuedReservedInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsAccessScopeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17824,79 +17481,7 @@ func (m *awsEc2query_deserializeOpDeleteQueuedReservedInstances) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteQueuedReservedInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpDeleteRoute struct { -} - -func (*awsEc2query_deserializeOpDeleteRoute) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpDeleteRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteRoute(response, &metadata) - } - output := &DeleteRouteOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorDeleteRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -17929,14 +17514,14 @@ func awsEc2query_deserializeOpErrorDeleteRoute(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpDeleteRouteServer struct { +type awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis struct { } -func (*awsEc2query_deserializeOpDeleteRouteServer) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAccessScopeAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -17954,9 +17539,9 @@ func (m *awsEc2query_deserializeOpDeleteRouteServer) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteServer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScopeAnalysis(response, &metadata) } - output := &DeleteRouteServerOutput{} + output := &DeleteNetworkInsightsAccessScopeAnalysisOutput{} out.Result = output var buff [1024]byte @@ -17977,7 +17562,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteServer) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteRouteServerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsAccessScopeAnalysisOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -17991,7 +17576,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteServer) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAccessScopeAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18024,14 +17609,14 @@ func awsEc2query_deserializeOpErrorDeleteRouteServer(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDeleteRouteServerEndpoint struct { +type awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis struct { } -func (*awsEc2query_deserializeOpDeleteRouteServerEndpoint) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteRouteServerEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkInsightsAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18049,9 +17634,9 @@ func (m *awsEc2query_deserializeOpDeleteRouteServerEndpoint) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteServerEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsAnalysis(response, &metadata) } - output := &DeleteRouteServerEndpointOutput{} + output := &DeleteNetworkInsightsAnalysisOutput{} out.Result = output var buff [1024]byte @@ -18072,7 +17657,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteServerEndpoint) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteRouteServerEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsAnalysisOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -18086,7 +17671,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteServerEndpoint) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteRouteServerEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkInsightsAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18119,14 +17704,14 @@ func awsEc2query_deserializeOpErrorDeleteRouteServerEndpoint(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteRouteServerPeer struct { +type awsEc2query_deserializeOpDeleteNetworkInsightsPath struct { } -func (*awsEc2query_deserializeOpDeleteRouteServerPeer) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkInsightsPath) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteRouteServerPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkInsightsPath) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18144,9 +17729,9 @@ func (m *awsEc2query_deserializeOpDeleteRouteServerPeer) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteServerPeer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInsightsPath(response, &metadata) } - output := &DeleteRouteServerPeerOutput{} + output := &DeleteNetworkInsightsPathOutput{} out.Result = output var buff [1024]byte @@ -18167,7 +17752,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteServerPeer) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteRouteServerPeerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteNetworkInsightsPathOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -18181,7 +17766,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteServerPeer) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteRouteServerPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkInsightsPath(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18214,14 +17799,14 @@ func awsEc2query_deserializeOpErrorDeleteRouteServerPeer(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDeleteRouteTable struct { +type awsEc2query_deserializeOpDeleteNetworkInterface struct { } -func (*awsEc2query_deserializeOpDeleteRouteTable) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkInterface) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18239,9 +17824,9 @@ func (m *awsEc2query_deserializeOpDeleteRouteTable) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInterface(response, &metadata) } - output := &DeleteRouteTableOutput{} + output := &DeleteNetworkInterfaceOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -18253,7 +17838,7 @@ func (m *awsEc2query_deserializeOpDeleteRouteTable) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18286,14 +17871,14 @@ func awsEc2query_deserializeOpErrorDeleteRouteTable(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDeleteSecurityGroup struct { +type awsEc2query_deserializeOpDeleteNetworkInterfacePermission struct { } -func (*awsEc2query_deserializeOpDeleteSecurityGroup) ID() string { +func (*awsEc2query_deserializeOpDeleteNetworkInterfacePermission) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteSecurityGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteNetworkInterfacePermission) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18311,9 +17896,9 @@ func (m *awsEc2query_deserializeOpDeleteSecurityGroup) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteSecurityGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteNetworkInterfacePermission(response, &metadata) } - output := &DeleteSecurityGroupOutput{} + output := &DeleteNetworkInterfacePermissionOutput{} out.Result = output var buff [1024]byte @@ -18334,7 +17919,7 @@ func (m *awsEc2query_deserializeOpDeleteSecurityGroup) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteSecurityGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteNetworkInterfacePermissionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -18348,7 +17933,7 @@ func (m *awsEc2query_deserializeOpDeleteSecurityGroup) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteSecurityGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteNetworkInterfacePermission(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18381,14 +17966,14 @@ func awsEc2query_deserializeOpErrorDeleteSecurityGroup(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDeleteSnapshot struct { +type awsEc2query_deserializeOpDeletePlacementGroup struct { } -func (*awsEc2query_deserializeOpDeleteSnapshot) ID() string { +func (*awsEc2query_deserializeOpDeletePlacementGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeletePlacementGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18406,9 +17991,9 @@ func (m *awsEc2query_deserializeOpDeleteSnapshot) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteSnapshot(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeletePlacementGroup(response, &metadata) } - output := &DeleteSnapshotOutput{} + output := &DeletePlacementGroupOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -18420,7 +18005,7 @@ func (m *awsEc2query_deserializeOpDeleteSnapshot) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeletePlacementGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18453,14 +18038,14 @@ func awsEc2query_deserializeOpErrorDeleteSnapshot(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDeleteSpotDatafeedSubscription struct { +type awsEc2query_deserializeOpDeletePublicIpv4Pool struct { } -func (*awsEc2query_deserializeOpDeleteSpotDatafeedSubscription) ID() string { +func (*awsEc2query_deserializeOpDeletePublicIpv4Pool) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteSpotDatafeedSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeletePublicIpv4Pool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18478,21 +18063,44 @@ func (m *awsEc2query_deserializeOpDeleteSpotDatafeedSubscription) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteSpotDatafeedSubscription(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeletePublicIpv4Pool(response, &metadata) } - output := &DeleteSpotDatafeedSubscriptionOutput{} + output := &DeletePublicIpv4PoolOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeletePublicIpv4PoolOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteSpotDatafeedSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeletePublicIpv4Pool(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18525,14 +18133,14 @@ func awsEc2query_deserializeOpErrorDeleteSpotDatafeedSubscription(response *smit } } -type awsEc2query_deserializeOpDeleteSubnet struct { +type awsEc2query_deserializeOpDeleteQueuedReservedInstances struct { } -func (*awsEc2query_deserializeOpDeleteSubnet) ID() string { +func (*awsEc2query_deserializeOpDeleteQueuedReservedInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteSubnet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteQueuedReservedInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18550,9 +18158,104 @@ func (m *awsEc2query_deserializeOpDeleteSubnet) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteSubnet(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteQueuedReservedInstances(response, &metadata) } - output := &DeleteSubnetOutput{} + output := &DeleteQueuedReservedInstancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteQueuedReservedInstancesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorDeleteQueuedReservedInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpDeleteRoute struct { +} + +func (*awsEc2query_deserializeOpDeleteRoute) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpDeleteRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorDeleteRoute(response, &metadata) + } + output := &DeleteRouteOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -18564,7 +18267,7 @@ func (m *awsEc2query_deserializeOpDeleteSubnet) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteSubnet(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18597,14 +18300,14 @@ func awsEc2query_deserializeOpErrorDeleteSubnet(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDeleteSubnetCidrReservation struct { +type awsEc2query_deserializeOpDeleteRouteServer struct { } -func (*awsEc2query_deserializeOpDeleteSubnetCidrReservation) ID() string { +func (*awsEc2query_deserializeOpDeleteRouteServer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteSubnetCidrReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18622,9 +18325,9 @@ func (m *awsEc2query_deserializeOpDeleteSubnetCidrReservation) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteSubnetCidrReservation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteServer(response, &metadata) } - output := &DeleteSubnetCidrReservationOutput{} + output := &DeleteRouteServerOutput{} out.Result = output var buff [1024]byte @@ -18645,7 +18348,7 @@ func (m *awsEc2query_deserializeOpDeleteSubnetCidrReservation) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteSubnetCidrReservationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteRouteServerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -18659,7 +18362,7 @@ func (m *awsEc2query_deserializeOpDeleteSubnetCidrReservation) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteSubnetCidrReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18692,14 +18395,14 @@ func awsEc2query_deserializeOpErrorDeleteSubnetCidrReservation(response *smithyh } } -type awsEc2query_deserializeOpDeleteTags struct { +type awsEc2query_deserializeOpDeleteRouteServerEndpoint struct { } -func (*awsEc2query_deserializeOpDeleteTags) ID() string { +func (*awsEc2query_deserializeOpDeleteRouteServerEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteRouteServerEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18717,21 +18420,44 @@ func (m *awsEc2query_deserializeOpDeleteTags) HandleDeserialize(ctx context.Cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTags(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteServerEndpoint(response, &metadata) } - output := &DeleteTagsOutput{} + output := &DeleteRouteServerEndpointOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteRouteServerEndpointOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteRouteServerEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18764,14 +18490,14 @@ func awsEc2query_deserializeOpErrorDeleteTags(response *smithyhttp.Response, met } } -type awsEc2query_deserializeOpDeleteTrafficMirrorFilter struct { +type awsEc2query_deserializeOpDeleteRouteServerPeer struct { } -func (*awsEc2query_deserializeOpDeleteTrafficMirrorFilter) ID() string { +func (*awsEc2query_deserializeOpDeleteRouteServerPeer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilter) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteRouteServerPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18789,9 +18515,9 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilter) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilter(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteServerPeer(response, &metadata) } - output := &DeleteTrafficMirrorFilterOutput{} + output := &DeleteRouteServerPeerOutput{} out.Result = output var buff [1024]byte @@ -18812,7 +18538,7 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilter) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorFilterOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteRouteServerPeerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -18826,7 +18552,7 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilter) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilter(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteRouteServerPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18859,14 +18585,14 @@ func awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilter(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule struct { +type awsEc2query_deserializeOpDeleteRouteTable struct { } -func (*awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule) ID() string { +func (*awsEc2query_deserializeOpDeleteRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18884,44 +18610,21 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilterRule(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteRouteTable(response, &metadata) } - output := &DeleteTrafficMirrorFilterRuleOutput{} + output := &DeleteRouteTableOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorFilterRuleOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilterRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -18954,14 +18657,14 @@ func awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilterRule(response *smith } } -type awsEc2query_deserializeOpDeleteTrafficMirrorSession struct { +type awsEc2query_deserializeOpDeleteSecurityGroup struct { } -func (*awsEc2query_deserializeOpDeleteTrafficMirrorSession) ID() string { +func (*awsEc2query_deserializeOpDeleteSecurityGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTrafficMirrorSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteSecurityGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -18979,9 +18682,9 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorSession) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorSession(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteSecurityGroup(response, &metadata) } - output := &DeleteTrafficMirrorSessionOutput{} + output := &DeleteSecurityGroupOutput{} out.Result = output var buff [1024]byte @@ -19002,7 +18705,7 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorSession) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorSessionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteSecurityGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19016,7 +18719,7 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorSession) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTrafficMirrorSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteSecurityGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19049,14 +18752,14 @@ func awsEc2query_deserializeOpErrorDeleteTrafficMirrorSession(response *smithyht } } -type awsEc2query_deserializeOpDeleteTrafficMirrorTarget struct { +type awsEc2query_deserializeOpDeleteSnapshot struct { } -func (*awsEc2query_deserializeOpDeleteTrafficMirrorTarget) ID() string { +func (*awsEc2query_deserializeOpDeleteSnapshot) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTrafficMirrorTarget) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19074,44 +18777,21 @@ func (m *awsEc2query_deserializeOpDeleteTrafficMirrorTarget) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorTarget(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteSnapshot(response, &metadata) } - output := &DeleteTrafficMirrorTargetOutput{} + output := &DeleteSnapshotOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorTargetOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTrafficMirrorTarget(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19144,14 +18824,14 @@ func awsEc2query_deserializeOpErrorDeleteTrafficMirrorTarget(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteTransitGateway struct { +type awsEc2query_deserializeOpDeleteSpotDatafeedSubscription struct { } -func (*awsEc2query_deserializeOpDeleteTransitGateway) ID() string { +func (*awsEc2query_deserializeOpDeleteSpotDatafeedSubscription) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteSpotDatafeedSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19169,44 +18849,93 @@ func (m *awsEc2query_deserializeOpDeleteTransitGateway) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteSpotDatafeedSubscription(response, &metadata) } - output := &DeleteTransitGatewayOutput{} + output := &DeleteSpotDatafeedSubscriptionOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } } - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayOutput(&output, decoder) + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorDeleteSpotDatafeedSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, } + return genericError + + } +} + +type awsEc2query_deserializeOpDeleteSubnet struct { +} + +func (*awsEc2query_deserializeOpDeleteSubnet) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpDeleteSubnet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorDeleteSubnet(response, &metadata) + } + output := &DeleteSubnetOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteSubnet(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19239,14 +18968,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGateway(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeleteTransitGatewayConnect struct { +type awsEc2query_deserializeOpDeleteSubnetCidrReservation struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayConnect) ID() string { +func (*awsEc2query_deserializeOpDeleteSubnetCidrReservation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnect) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteSubnetCidrReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19264,9 +18993,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnect) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayConnect(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteSubnetCidrReservation(response, &metadata) } - output := &DeleteTransitGatewayConnectOutput{} + output := &DeleteSubnetCidrReservationOutput{} out.Result = output var buff [1024]byte @@ -19287,7 +19016,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnect) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayConnectOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteSubnetCidrReservationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19301,7 +19030,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnect) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayConnect(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteSubnetCidrReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19334,14 +19063,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayConnect(response *smithyh } } -type awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer struct { +type awsEc2query_deserializeOpDeleteTags struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer) ID() string { +func (*awsEc2query_deserializeOpDeleteTags) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19359,44 +19088,21 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayConnectPeer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTags(response, &metadata) } - output := &DeleteTransitGatewayConnectPeerOutput{} + output := &DeleteTagsOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayConnectPeerOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayConnectPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19429,14 +19135,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayConnectPeer(response *smi } } -type awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain struct { +type awsEc2query_deserializeOpDeleteTrafficMirrorFilter struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) ID() string { +func (*awsEc2query_deserializeOpDeleteTrafficMirrorFilter) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilter) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19454,9 +19160,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayMulticastDomain(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilter(response, &metadata) } - output := &DeleteTransitGatewayMulticastDomainOutput{} + output := &DeleteTrafficMirrorFilterOutput{} out.Result = output var buff [1024]byte @@ -19477,7 +19183,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayMulticastDomainOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorFilterOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19491,7 +19197,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayMulticastDomain(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilter(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19524,14 +19230,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayMulticastDomain(response } } -type awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment struct { +type awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) ID() string { +func (*awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTrafficMirrorFilterRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19549,9 +19255,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayPeeringAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilterRule(response, &metadata) } - output := &DeleteTransitGatewayPeeringAttachmentOutput{} + output := &DeleteTrafficMirrorFilterRuleOutput{} out.Result = output var buff [1024]byte @@ -19572,7 +19278,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayPeeringAttachmentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorFilterRuleOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19586,7 +19292,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayPeeringAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTrafficMirrorFilterRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19619,14 +19325,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayPeeringAttachment(respons } } -type awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable struct { +type awsEc2query_deserializeOpDeleteTrafficMirrorSession struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) ID() string { +func (*awsEc2query_deserializeOpDeleteTrafficMirrorSession) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTrafficMirrorSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19644,9 +19350,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayPolicyTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorSession(response, &metadata) } - output := &DeleteTransitGatewayPolicyTableOutput{} + output := &DeleteTrafficMirrorSessionOutput{} out.Result = output var buff [1024]byte @@ -19667,7 +19373,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayPolicyTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorSessionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19681,7 +19387,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayPolicyTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTrafficMirrorSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19714,14 +19420,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayPolicyTable(response *smi } } -type awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference struct { +type awsEc2query_deserializeOpDeleteTrafficMirrorTarget struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) ID() string { +func (*awsEc2query_deserializeOpDeleteTrafficMirrorTarget) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTrafficMirrorTarget) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19739,9 +19445,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayPrefixListReference(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTrafficMirrorTarget(response, &metadata) } - output := &DeleteTransitGatewayPrefixListReferenceOutput{} + output := &DeleteTrafficMirrorTargetOutput{} out.Result = output var buff [1024]byte @@ -19762,7 +19468,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayPrefixListReferenceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTrafficMirrorTargetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19776,7 +19482,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayPrefixListReference(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTrafficMirrorTarget(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19809,14 +19515,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayPrefixListReference(respo } } -type awsEc2query_deserializeOpDeleteTransitGatewayRoute struct { +type awsEc2query_deserializeOpDeleteTransitGateway struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayRoute) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19834,9 +19540,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRoute) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGateway(response, &metadata) } - output := &DeleteTransitGatewayRouteOutput{} + output := &DeleteTransitGatewayOutput{} out.Result = output var buff [1024]byte @@ -19857,7 +19563,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRoute) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19871,7 +19577,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRoute) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19904,14 +19610,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayRoute(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteTransitGatewayRouteTable struct { +type awsEc2query_deserializeOpDeleteTransitGatewayConnect struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayConnect) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnect) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19929,9 +19635,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayConnect(response, &metadata) } - output := &DeleteTransitGatewayRouteTableOutput{} + output := &DeleteTransitGatewayConnectOutput{} out.Result = output var buff [1024]byte @@ -19952,7 +19658,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayRouteTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayConnectOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19966,7 +19672,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayConnect(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19999,14 +19705,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTable(response *smit } } -type awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement struct { +type awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayConnectPeer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20024,9 +19730,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTableAnnouncement(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayConnectPeer(response, &metadata) } - output := &DeleteTransitGatewayRouteTableAnnouncementOutput{} + output := &DeleteTransitGatewayConnectPeerOutput{} out.Result = output var buff [1024]byte @@ -20047,7 +19753,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayRouteTableAnnouncementOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayConnectPeerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20061,7 +19767,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTableAnnouncement(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayConnectPeer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20094,14 +19800,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTableAnnouncement(re } } -type awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment struct { +type awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain struct { } -func (*awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayMulticastDomain) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20119,9 +19825,9 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayVpcAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayMulticastDomain(response, &metadata) } - output := &DeleteTransitGatewayVpcAttachmentOutput{} + output := &DeleteTransitGatewayMulticastDomainOutput{} out.Result = output var buff [1024]byte @@ -20142,7 +19848,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayVpcAttachmentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayMulticastDomainOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20156,7 +19862,7 @@ func (m *awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayMulticastDomain(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20189,14 +19895,14 @@ func awsEc2query_deserializeOpErrorDeleteTransitGatewayVpcAttachment(response *s } } -type awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint struct { +type awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment struct { } -func (*awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayPeeringAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20214,9 +19920,9 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayPeeringAttachment(response, &metadata) } - output := &DeleteVerifiedAccessEndpointOutput{} + output := &DeleteTransitGatewayPeeringAttachmentOutput{} out.Result = output var buff [1024]byte @@ -20237,7 +19943,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayPeeringAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20251,7 +19957,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVerifiedAccessEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayPeeringAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20284,14 +19990,14 @@ func awsEc2query_deserializeOpErrorDeleteVerifiedAccessEndpoint(response *smithy } } -type awsEc2query_deserializeOpDeleteVerifiedAccessGroup struct { +type awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable struct { } -func (*awsEc2query_deserializeOpDeleteVerifiedAccessGroup) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVerifiedAccessGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayPolicyTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20309,9 +20015,9 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessGroup) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayPolicyTable(response, &metadata) } - output := &DeleteVerifiedAccessGroupOutput{} + output := &DeleteTransitGatewayPolicyTableOutput{} out.Result = output var buff [1024]byte @@ -20332,7 +20038,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessGroup) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayPolicyTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20346,7 +20052,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessGroup) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVerifiedAccessGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayPolicyTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20379,14 +20085,14 @@ func awsEc2query_deserializeOpErrorDeleteVerifiedAccessGroup(response *smithyhtt } } -type awsEc2query_deserializeOpDeleteVerifiedAccessInstance struct { +type awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference struct { } -func (*awsEc2query_deserializeOpDeleteVerifiedAccessInstance) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVerifiedAccessInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayPrefixListReference) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20404,9 +20110,9 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessInstance) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessInstance(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayPrefixListReference(response, &metadata) } - output := &DeleteVerifiedAccessInstanceOutput{} + output := &DeleteTransitGatewayPrefixListReferenceOutput{} out.Result = output var buff [1024]byte @@ -20427,7 +20133,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessInstance) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessInstanceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayPrefixListReferenceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20441,7 +20147,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessInstance) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVerifiedAccessInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayPrefixListReference(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20474,14 +20180,14 @@ func awsEc2query_deserializeOpErrorDeleteVerifiedAccessInstance(response *smithy } } -type awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider struct { +type awsEc2query_deserializeOpDeleteTransitGatewayRoute struct { } -func (*awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20499,9 +20205,9 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessTrustProvider(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayRoute(response, &metadata) } - output := &DeleteVerifiedAccessTrustProviderOutput{} + output := &DeleteTransitGatewayRouteOutput{} out.Result = output var buff [1024]byte @@ -20522,7 +20228,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessTrustProviderOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20536,7 +20242,7 @@ func (m *awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20569,14 +20275,14 @@ func awsEc2query_deserializeOpErrorDeleteVerifiedAccessTrustProvider(response *s } } -type awsEc2query_deserializeOpDeleteVolume struct { +type awsEc2query_deserializeOpDeleteTransitGatewayRouteTable struct { } -func (*awsEc2query_deserializeOpDeleteVolume) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20594,21 +20300,44 @@ func (m *awsEc2query_deserializeOpDeleteVolume) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVolume(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTable(response, &metadata) } - output := &DeleteVolumeOutput{} + output := &DeleteTransitGatewayRouteTableOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayRouteTableOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20641,14 +20370,14 @@ func awsEc2query_deserializeOpErrorDeleteVolume(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDeleteVpc struct { +type awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement struct { } -func (*awsEc2query_deserializeOpDeleteVpc) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayRouteTableAnnouncement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20666,21 +20395,44 @@ func (m *awsEc2query_deserializeOpDeleteVpc) HandleDeserialize(ctx context.Conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpc(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTableAnnouncement(response, &metadata) } - output := &DeleteVpcOutput{} + output := &DeleteTransitGatewayRouteTableAnnouncementOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayRouteTableAnnouncementOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayRouteTableAnnouncement(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20713,14 +20465,14 @@ func awsEc2query_deserializeOpErrorDeleteVpc(response *smithyhttp.Response, meta } } -type awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion struct { +type awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment struct { } -func (*awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) ID() string { +func (*awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20738,9 +20490,9 @@ func (m *awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcBlockPublicAccessExclusion(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteTransitGatewayVpcAttachment(response, &metadata) } - output := &DeleteVpcBlockPublicAccessExclusionOutput{} + output := &DeleteTransitGatewayVpcAttachmentOutput{} out.Result = output var buff [1024]byte @@ -20761,7 +20513,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVpcBlockPublicAccessExclusionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteTransitGatewayVpcAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20775,7 +20527,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpcBlockPublicAccessExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20808,14 +20560,14 @@ func awsEc2query_deserializeOpErrorDeleteVpcBlockPublicAccessExclusion(response } } -type awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications struct { +type awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint struct { } -func (*awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) ID() string { +func (*awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVerifiedAccessEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20833,9 +20585,9 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcEndpointConnectionNotifications(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessEndpoint(response, &metadata) } - output := &DeleteVpcEndpointConnectionNotificationsOutput{} + output := &DeleteVerifiedAccessEndpointOutput{} out.Result = output var buff [1024]byte @@ -20856,7 +20608,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVpcEndpointConnectionNotificationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20870,7 +20622,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpcEndpointConnectionNotifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVerifiedAccessEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20903,14 +20655,14 @@ func awsEc2query_deserializeOpErrorDeleteVpcEndpointConnectionNotifications(resp } } -type awsEc2query_deserializeOpDeleteVpcEndpoints struct { +type awsEc2query_deserializeOpDeleteVerifiedAccessGroup struct { } -func (*awsEc2query_deserializeOpDeleteVpcEndpoints) ID() string { +func (*awsEc2query_deserializeOpDeleteVerifiedAccessGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpcEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVerifiedAccessGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20928,9 +20680,9 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpoints) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcEndpoints(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessGroup(response, &metadata) } - output := &DeleteVpcEndpointsOutput{} + output := &DeleteVerifiedAccessGroupOutput{} out.Result = output var buff [1024]byte @@ -20951,7 +20703,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpoints) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVpcEndpointsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20965,7 +20717,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpoints) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpcEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVerifiedAccessGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20998,14 +20750,14 @@ func awsEc2query_deserializeOpErrorDeleteVpcEndpoints(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations struct { +type awsEc2query_deserializeOpDeleteVerifiedAccessInstance struct { } -func (*awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) ID() string { +func (*awsEc2query_deserializeOpDeleteVerifiedAccessInstance) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVerifiedAccessInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21023,9 +20775,9 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) Handle } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcEndpointServiceConfigurations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessInstance(response, &metadata) } - output := &DeleteVpcEndpointServiceConfigurationsOutput{} + output := &DeleteVerifiedAccessInstanceOutput{} out.Result = output var buff [1024]byte @@ -21046,7 +20798,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) Handle } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVpcEndpointServiceConfigurationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessInstanceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21060,7 +20812,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) Handle return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpcEndpointServiceConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVerifiedAccessInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21093,14 +20845,14 @@ func awsEc2query_deserializeOpErrorDeleteVpcEndpointServiceConfigurations(respon } } -type awsEc2query_deserializeOpDeleteVpcPeeringConnection struct { +type awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider struct { } -func (*awsEc2query_deserializeOpDeleteVpcPeeringConnection) ID() string { +func (*awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpcPeeringConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21118,9 +20870,9 @@ func (m *awsEc2query_deserializeOpDeleteVpcPeeringConnection) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcPeeringConnection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVerifiedAccessTrustProvider(response, &metadata) } - output := &DeleteVpcPeeringConnectionOutput{} + output := &DeleteVerifiedAccessTrustProviderOutput{} out.Result = output var buff [1024]byte @@ -21141,7 +20893,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcPeeringConnection) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeleteVpcPeeringConnectionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVerifiedAccessTrustProviderOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21155,79 +20907,7 @@ func (m *awsEc2query_deserializeOpDeleteVpcPeeringConnection) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpcPeeringConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpDeleteVpnConnection struct { -} - -func (*awsEc2query_deserializeOpDeleteVpnConnection) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpDeleteVpnConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpnConnection(response, &metadata) - } - output := &DeleteVpnConnectionOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorDeleteVpnConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21260,14 +20940,14 @@ func awsEc2query_deserializeOpErrorDeleteVpnConnection(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDeleteVpnConnectionRoute struct { +type awsEc2query_deserializeOpDeleteVolume struct { } -func (*awsEc2query_deserializeOpDeleteVpnConnectionRoute) ID() string { +func (*awsEc2query_deserializeOpDeleteVolume) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpnConnectionRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21285,9 +20965,9 @@ func (m *awsEc2query_deserializeOpDeleteVpnConnectionRoute) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpnConnectionRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVolume(response, &metadata) } - output := &DeleteVpnConnectionRouteOutput{} + output := &DeleteVolumeOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -21299,7 +20979,7 @@ func (m *awsEc2query_deserializeOpDeleteVpnConnectionRoute) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpnConnectionRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21332,14 +21012,14 @@ func awsEc2query_deserializeOpErrorDeleteVpnConnectionRoute(response *smithyhttp } } -type awsEc2query_deserializeOpDeleteVpnGateway struct { +type awsEc2query_deserializeOpDeleteVpc struct { } -func (*awsEc2query_deserializeOpDeleteVpnGateway) ID() string { +func (*awsEc2query_deserializeOpDeleteVpc) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeleteVpnGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21357,9 +21037,9 @@ func (m *awsEc2query_deserializeOpDeleteVpnGateway) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeleteVpnGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpc(response, &metadata) } - output := &DeleteVpnGatewayOutput{} + output := &DeleteVpcOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -21371,7 +21051,7 @@ func (m *awsEc2query_deserializeOpDeleteVpnGateway) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDeleteVpnGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21404,14 +21084,14 @@ func awsEc2query_deserializeOpErrorDeleteVpnGateway(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDeprovisionByoipCidr struct { +type awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion struct { } -func (*awsEc2query_deserializeOpDeprovisionByoipCidr) ID() string { +func (*awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeprovisionByoipCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpcBlockPublicAccessExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21429,9 +21109,9 @@ func (m *awsEc2query_deserializeOpDeprovisionByoipCidr) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeprovisionByoipCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcBlockPublicAccessExclusion(response, &metadata) } - output := &DeprovisionByoipCidrOutput{} + output := &DeleteVpcBlockPublicAccessExclusionOutput{} out.Result = output var buff [1024]byte @@ -21452,7 +21132,7 @@ func (m *awsEc2query_deserializeOpDeprovisionByoipCidr) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeprovisionByoipCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVpcBlockPublicAccessExclusionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21466,7 +21146,7 @@ func (m *awsEc2query_deserializeOpDeprovisionByoipCidr) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDeprovisionByoipCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpcBlockPublicAccessExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21499,14 +21179,14 @@ func awsEc2query_deserializeOpErrorDeprovisionByoipCidr(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDeprovisionIpamByoasn struct { +type awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications struct { } -func (*awsEc2query_deserializeOpDeprovisionIpamByoasn) ID() string { +func (*awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeprovisionIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpcEndpointConnectionNotifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21524,9 +21204,9 @@ func (m *awsEc2query_deserializeOpDeprovisionIpamByoasn) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeprovisionIpamByoasn(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcEndpointConnectionNotifications(response, &metadata) } - output := &DeprovisionIpamByoasnOutput{} + output := &DeleteVpcEndpointConnectionNotificationsOutput{} out.Result = output var buff [1024]byte @@ -21547,7 +21227,7 @@ func (m *awsEc2query_deserializeOpDeprovisionIpamByoasn) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeprovisionIpamByoasnOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVpcEndpointConnectionNotificationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21561,7 +21241,7 @@ func (m *awsEc2query_deserializeOpDeprovisionIpamByoasn) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorDeprovisionIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpcEndpointConnectionNotifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21594,14 +21274,14 @@ func awsEc2query_deserializeOpErrorDeprovisionIpamByoasn(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDeprovisionIpamPoolCidr struct { +type awsEc2query_deserializeOpDeleteVpcEndpoints struct { } -func (*awsEc2query_deserializeOpDeprovisionIpamPoolCidr) ID() string { +func (*awsEc2query_deserializeOpDeleteVpcEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeprovisionIpamPoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpcEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21619,9 +21299,9 @@ func (m *awsEc2query_deserializeOpDeprovisionIpamPoolCidr) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeprovisionIpamPoolCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcEndpoints(response, &metadata) } - output := &DeprovisionIpamPoolCidrOutput{} + output := &DeleteVpcEndpointsOutput{} out.Result = output var buff [1024]byte @@ -21642,7 +21322,7 @@ func (m *awsEc2query_deserializeOpDeprovisionIpamPoolCidr) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeprovisionIpamPoolCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVpcEndpointsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21656,7 +21336,7 @@ func (m *awsEc2query_deserializeOpDeprovisionIpamPoolCidr) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDeprovisionIpamPoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpcEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21689,14 +21369,14 @@ func awsEc2query_deserializeOpErrorDeprovisionIpamPoolCidr(response *smithyhttp. } } -type awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr struct { +type awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations struct { } -func (*awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) ID() string { +func (*awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpcEndpointServiceConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21714,9 +21394,9 @@ func (m *awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeprovisionPublicIpv4PoolCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcEndpointServiceConfigurations(response, &metadata) } - output := &DeprovisionPublicIpv4PoolCidrOutput{} + output := &DeleteVpcEndpointServiceConfigurationsOutput{} out.Result = output var buff [1024]byte @@ -21737,7 +21417,7 @@ func (m *awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeprovisionPublicIpv4PoolCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVpcEndpointServiceConfigurationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21751,7 +21431,7 @@ func (m *awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDeprovisionPublicIpv4PoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpcEndpointServiceConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21784,14 +21464,14 @@ func awsEc2query_deserializeOpErrorDeprovisionPublicIpv4PoolCidr(response *smith } } -type awsEc2query_deserializeOpDeregisterImage struct { +type awsEc2query_deserializeOpDeleteVpcPeeringConnection struct { } -func (*awsEc2query_deserializeOpDeregisterImage) ID() string { +func (*awsEc2query_deserializeOpDeleteVpcPeeringConnection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeregisterImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpcPeeringConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21809,9 +21489,9 @@ func (m *awsEc2query_deserializeOpDeregisterImage) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeregisterImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpcPeeringConnection(response, &metadata) } - output := &DeregisterImageOutput{} + output := &DeleteVpcPeeringConnectionOutput{} out.Result = output var buff [1024]byte @@ -21832,7 +21512,7 @@ func (m *awsEc2query_deserializeOpDeregisterImage) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeregisterImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeleteVpcPeeringConnectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21846,7 +21526,7 @@ func (m *awsEc2query_deserializeOpDeregisterImage) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorDeregisterImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpcPeeringConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21879,14 +21559,14 @@ func awsEc2query_deserializeOpErrorDeregisterImage(response *smithyhttp.Response } } -type awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes struct { +type awsEc2query_deserializeOpDeleteVpnConnection struct { } -func (*awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes) ID() string { +func (*awsEc2query_deserializeOpDeleteVpnConnection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpnConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21904,44 +21584,21 @@ func (m *awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeregisterInstanceEventNotificationAttributes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpnConnection(response, &metadata) } - output := &DeregisterInstanceEventNotificationAttributesOutput{} + output := &DeleteVpnConnectionOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeregisterInstanceEventNotificationAttributesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeregisterInstanceEventNotificationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpnConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21974,14 +21631,14 @@ func awsEc2query_deserializeOpErrorDeregisterInstanceEventNotificationAttributes } } -type awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers struct { +type awsEc2query_deserializeOpDeleteVpnConnectionRoute struct { } -func (*awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers) ID() string { +func (*awsEc2query_deserializeOpDeleteVpnConnectionRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpnConnectionRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21999,44 +21656,21 @@ func (m *awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupMembers(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpnConnectionRoute(response, &metadata) } - output := &DeregisterTransitGatewayMulticastGroupMembersOutput{} + output := &DeleteVpnConnectionRouteOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeregisterTransitGatewayMulticastGroupMembersOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupMembers(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpnConnectionRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22069,14 +21703,14 @@ func awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupMembers } } -type awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources struct { +type awsEc2query_deserializeOpDeleteVpnGateway struct { } -func (*awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources) ID() string { +func (*awsEc2query_deserializeOpDeleteVpnGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeleteVpnGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22094,44 +21728,21 @@ func (m *awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupSources(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeleteVpnGateway(response, &metadata) } - output := &DeregisterTransitGatewayMulticastGroupSourcesOutput{} + output := &DeleteVpnGatewayOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDeregisterTransitGatewayMulticastGroupSourcesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupSources(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeleteVpnGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22164,14 +21775,14 @@ func awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupSources } } -type awsEc2query_deserializeOpDescribeAccountAttributes struct { +type awsEc2query_deserializeOpDeprovisionByoipCidr struct { } -func (*awsEc2query_deserializeOpDescribeAccountAttributes) ID() string { +func (*awsEc2query_deserializeOpDeprovisionByoipCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAccountAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeprovisionByoipCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22189,9 +21800,9 @@ func (m *awsEc2query_deserializeOpDescribeAccountAttributes) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAccountAttributes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeprovisionByoipCidr(response, &metadata) } - output := &DescribeAccountAttributesOutput{} + output := &DeprovisionByoipCidrOutput{} out.Result = output var buff [1024]byte @@ -22212,7 +21823,7 @@ func (m *awsEc2query_deserializeOpDescribeAccountAttributes) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAccountAttributesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeprovisionByoipCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22226,7 +21837,7 @@ func (m *awsEc2query_deserializeOpDescribeAccountAttributes) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAccountAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeprovisionByoipCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22259,14 +21870,14 @@ func awsEc2query_deserializeOpErrorDescribeAccountAttributes(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeAddresses struct { +type awsEc2query_deserializeOpDeprovisionIpamByoasn struct { } -func (*awsEc2query_deserializeOpDescribeAddresses) ID() string { +func (*awsEc2query_deserializeOpDeprovisionIpamByoasn) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeprovisionIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22284,9 +21895,9 @@ func (m *awsEc2query_deserializeOpDescribeAddresses) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAddresses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeprovisionIpamByoasn(response, &metadata) } - output := &DescribeAddressesOutput{} + output := &DeprovisionIpamByoasnOutput{} out.Result = output var buff [1024]byte @@ -22307,7 +21918,7 @@ func (m *awsEc2query_deserializeOpDescribeAddresses) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAddressesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeprovisionIpamByoasnOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22321,7 +21932,7 @@ func (m *awsEc2query_deserializeOpDescribeAddresses) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeprovisionIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22354,14 +21965,14 @@ func awsEc2query_deserializeOpErrorDescribeAddresses(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDescribeAddressesAttribute struct { +type awsEc2query_deserializeOpDeprovisionIpamPoolCidr struct { } -func (*awsEc2query_deserializeOpDescribeAddressesAttribute) ID() string { +func (*awsEc2query_deserializeOpDeprovisionIpamPoolCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAddressesAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeprovisionIpamPoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22379,9 +21990,9 @@ func (m *awsEc2query_deserializeOpDescribeAddressesAttribute) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAddressesAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeprovisionIpamPoolCidr(response, &metadata) } - output := &DescribeAddressesAttributeOutput{} + output := &DeprovisionIpamPoolCidrOutput{} out.Result = output var buff [1024]byte @@ -22402,7 +22013,7 @@ func (m *awsEc2query_deserializeOpDescribeAddressesAttribute) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAddressesAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeprovisionIpamPoolCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22416,7 +22027,7 @@ func (m *awsEc2query_deserializeOpDescribeAddressesAttribute) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAddressesAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeprovisionIpamPoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22449,14 +22060,14 @@ func awsEc2query_deserializeOpErrorDescribeAddressesAttribute(response *smithyht } } -type awsEc2query_deserializeOpDescribeAddressTransfers struct { +type awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr struct { } -func (*awsEc2query_deserializeOpDescribeAddressTransfers) ID() string { +func (*awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAddressTransfers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeprovisionPublicIpv4PoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22474,9 +22085,9 @@ func (m *awsEc2query_deserializeOpDescribeAddressTransfers) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAddressTransfers(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeprovisionPublicIpv4PoolCidr(response, &metadata) } - output := &DescribeAddressTransfersOutput{} + output := &DeprovisionPublicIpv4PoolCidrOutput{} out.Result = output var buff [1024]byte @@ -22497,7 +22108,7 @@ func (m *awsEc2query_deserializeOpDescribeAddressTransfers) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAddressTransfersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeprovisionPublicIpv4PoolCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22511,7 +22122,7 @@ func (m *awsEc2query_deserializeOpDescribeAddressTransfers) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAddressTransfers(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeprovisionPublicIpv4PoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22544,14 +22155,14 @@ func awsEc2query_deserializeOpErrorDescribeAddressTransfers(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeAggregateIdFormat struct { +type awsEc2query_deserializeOpDeregisterImage struct { } -func (*awsEc2query_deserializeOpDescribeAggregateIdFormat) ID() string { +func (*awsEc2query_deserializeOpDeregisterImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAggregateIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeregisterImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22569,9 +22180,9 @@ func (m *awsEc2query_deserializeOpDescribeAggregateIdFormat) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAggregateIdFormat(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeregisterImage(response, &metadata) } - output := &DescribeAggregateIdFormatOutput{} + output := &DeregisterImageOutput{} out.Result = output var buff [1024]byte @@ -22592,7 +22203,7 @@ func (m *awsEc2query_deserializeOpDescribeAggregateIdFormat) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAggregateIdFormatOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeregisterImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22606,7 +22217,7 @@ func (m *awsEc2query_deserializeOpDescribeAggregateIdFormat) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAggregateIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeregisterImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22639,14 +22250,14 @@ func awsEc2query_deserializeOpErrorDescribeAggregateIdFormat(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeAvailabilityZones struct { +type awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes struct { } -func (*awsEc2query_deserializeOpDescribeAvailabilityZones) ID() string { +func (*awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAvailabilityZones) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeregisterInstanceEventNotificationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22664,9 +22275,9 @@ func (m *awsEc2query_deserializeOpDescribeAvailabilityZones) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAvailabilityZones(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeregisterInstanceEventNotificationAttributes(response, &metadata) } - output := &DescribeAvailabilityZonesOutput{} + output := &DeregisterInstanceEventNotificationAttributesOutput{} out.Result = output var buff [1024]byte @@ -22687,7 +22298,7 @@ func (m *awsEc2query_deserializeOpDescribeAvailabilityZones) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAvailabilityZonesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeregisterInstanceEventNotificationAttributesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22701,7 +22312,7 @@ func (m *awsEc2query_deserializeOpDescribeAvailabilityZones) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAvailabilityZones(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeregisterInstanceEventNotificationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22734,14 +22345,14 @@ func awsEc2query_deserializeOpErrorDescribeAvailabilityZones(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptions struct { +type awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers struct { } -func (*awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptions) ID() string { +func (*awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupMembers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22759,9 +22370,9 @@ func (m *awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptio } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeAwsNetworkPerformanceMetricSubscriptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupMembers(response, &metadata) } - output := &DescribeAwsNetworkPerformanceMetricSubscriptionsOutput{} + output := &DeregisterTransitGatewayMulticastGroupMembersOutput{} out.Result = output var buff [1024]byte @@ -22782,7 +22393,7 @@ func (m *awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptio } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeAwsNetworkPerformanceMetricSubscriptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeregisterTransitGatewayMulticastGroupMembersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22796,7 +22407,7 @@ func (m *awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptio return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeAwsNetworkPerformanceMetricSubscriptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupMembers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22829,14 +22440,14 @@ func awsEc2query_deserializeOpErrorDescribeAwsNetworkPerformanceMetricSubscripti } } -type awsEc2query_deserializeOpDescribeBundleTasks struct { +type awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources struct { } -func (*awsEc2query_deserializeOpDescribeBundleTasks) ID() string { +func (*awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeBundleTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDeregisterTransitGatewayMulticastGroupSources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22854,9 +22465,9 @@ func (m *awsEc2query_deserializeOpDescribeBundleTasks) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeBundleTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupSources(response, &metadata) } - output := &DescribeBundleTasksOutput{} + output := &DeregisterTransitGatewayMulticastGroupSourcesOutput{} out.Result = output var buff [1024]byte @@ -22877,7 +22488,7 @@ func (m *awsEc2query_deserializeOpDescribeBundleTasks) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeBundleTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDeregisterTransitGatewayMulticastGroupSourcesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22891,7 +22502,7 @@ func (m *awsEc2query_deserializeOpDescribeBundleTasks) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeBundleTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDeregisterTransitGatewayMulticastGroupSources(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -22924,14 +22535,14 @@ func awsEc2query_deserializeOpErrorDescribeBundleTasks(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeByoipCidrs struct { +type awsEc2query_deserializeOpDescribeAccountAttributes struct { } -func (*awsEc2query_deserializeOpDescribeByoipCidrs) ID() string { +func (*awsEc2query_deserializeOpDescribeAccountAttributes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeByoipCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAccountAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -22949,9 +22560,9 @@ func (m *awsEc2query_deserializeOpDescribeByoipCidrs) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeByoipCidrs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAccountAttributes(response, &metadata) } - output := &DescribeByoipCidrsOutput{} + output := &DescribeAccountAttributesOutput{} out.Result = output var buff [1024]byte @@ -22972,7 +22583,7 @@ func (m *awsEc2query_deserializeOpDescribeByoipCidrs) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeByoipCidrsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAccountAttributesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22986,7 +22597,7 @@ func (m *awsEc2query_deserializeOpDescribeByoipCidrs) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeByoipCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAccountAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23019,14 +22630,14 @@ func awsEc2query_deserializeOpErrorDescribeByoipCidrs(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory struct { +type awsEc2query_deserializeOpDescribeAddresses struct { } -func (*awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) ID() string { +func (*awsEc2query_deserializeOpDescribeAddresses) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23044,9 +22655,9 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionHistory(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAddresses(response, &metadata) } - output := &DescribeCapacityBlockExtensionHistoryOutput{} + output := &DescribeAddressesOutput{} out.Result = output var buff [1024]byte @@ -23067,7 +22678,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockExtensionHistoryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAddressesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23081,7 +22692,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23114,14 +22725,14 @@ func awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionHistory(respons } } -type awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings struct { +type awsEc2query_deserializeOpDescribeAddressesAttribute struct { } -func (*awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) ID() string { +func (*awsEc2query_deserializeOpDescribeAddressesAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAddressesAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23139,9 +22750,9 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionOfferings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAddressesAttribute(response, &metadata) } - output := &DescribeCapacityBlockExtensionOfferingsOutput{} + output := &DescribeAddressesAttributeOutput{} out.Result = output var buff [1024]byte @@ -23162,7 +22773,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockExtensionOfferingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAddressesAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23176,7 +22787,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAddressesAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23209,14 +22820,14 @@ func awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionOfferings(respo } } -type awsEc2query_deserializeOpDescribeCapacityBlockOfferings struct { +type awsEc2query_deserializeOpDescribeAddressTransfers struct { } -func (*awsEc2query_deserializeOpDescribeCapacityBlockOfferings) ID() string { +func (*awsEc2query_deserializeOpDescribeAddressTransfers) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCapacityBlockOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAddressTransfers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23234,9 +22845,9 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockOfferings) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockOfferings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAddressTransfers(response, &metadata) } - output := &DescribeCapacityBlockOfferingsOutput{} + output := &DescribeAddressTransfersOutput{} out.Result = output var buff [1024]byte @@ -23257,7 +22868,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockOfferings) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockOfferingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAddressTransfersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23271,7 +22882,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityBlockOfferings) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCapacityBlockOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAddressTransfers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23304,14 +22915,14 @@ func awsEc2query_deserializeOpErrorDescribeCapacityBlockOfferings(response *smit } } -type awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests struct { +type awsEc2query_deserializeOpDescribeAggregateIdFormat struct { } -func (*awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) ID() string { +func (*awsEc2query_deserializeOpDescribeAggregateIdFormat) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAggregateIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23329,9 +22940,9 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityReservationBillingRequests(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAggregateIdFormat(response, &metadata) } - output := &DescribeCapacityReservationBillingRequestsOutput{} + output := &DescribeAggregateIdFormatOutput{} out.Result = output var buff [1024]byte @@ -23352,7 +22963,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCapacityReservationBillingRequestsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAggregateIdFormatOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23366,7 +22977,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCapacityReservationBillingRequests(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAggregateIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23399,14 +23010,14 @@ func awsEc2query_deserializeOpErrorDescribeCapacityReservationBillingRequests(re } } -type awsEc2query_deserializeOpDescribeCapacityReservationFleets struct { +type awsEc2query_deserializeOpDescribeAvailabilityZones struct { } -func (*awsEc2query_deserializeOpDescribeCapacityReservationFleets) ID() string { +func (*awsEc2query_deserializeOpDescribeAvailabilityZones) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCapacityReservationFleets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAvailabilityZones) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23424,9 +23035,9 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservationFleets) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityReservationFleets(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAvailabilityZones(response, &metadata) } - output := &DescribeCapacityReservationFleetsOutput{} + output := &DescribeAvailabilityZonesOutput{} out.Result = output var buff [1024]byte @@ -23447,7 +23058,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservationFleets) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCapacityReservationFleetsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAvailabilityZonesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23461,7 +23072,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservationFleets) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCapacityReservationFleets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAvailabilityZones(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23494,14 +23105,14 @@ func awsEc2query_deserializeOpErrorDescribeCapacityReservationFleets(response *s } } -type awsEc2query_deserializeOpDescribeCapacityReservations struct { +type awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptions struct { } -func (*awsEc2query_deserializeOpDescribeCapacityReservations) ID() string { +func (*awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCapacityReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeAwsNetworkPerformanceMetricSubscriptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23519,9 +23130,9 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservations) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityReservations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeAwsNetworkPerformanceMetricSubscriptions(response, &metadata) } - output := &DescribeCapacityReservationsOutput{} + output := &DescribeAwsNetworkPerformanceMetricSubscriptionsOutput{} out.Result = output var buff [1024]byte @@ -23542,7 +23153,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservations) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCapacityReservationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeAwsNetworkPerformanceMetricSubscriptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23556,7 +23167,7 @@ func (m *awsEc2query_deserializeOpDescribeCapacityReservations) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCapacityReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeAwsNetworkPerformanceMetricSubscriptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23589,14 +23200,14 @@ func awsEc2query_deserializeOpErrorDescribeCapacityReservations(response *smithy } } -type awsEc2query_deserializeOpDescribeCarrierGateways struct { +type awsEc2query_deserializeOpDescribeBundleTasks struct { } -func (*awsEc2query_deserializeOpDescribeCarrierGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeBundleTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCarrierGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeBundleTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23614,9 +23225,9 @@ func (m *awsEc2query_deserializeOpDescribeCarrierGateways) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCarrierGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeBundleTasks(response, &metadata) } - output := &DescribeCarrierGatewaysOutput{} + output := &DescribeBundleTasksOutput{} out.Result = output var buff [1024]byte @@ -23637,7 +23248,7 @@ func (m *awsEc2query_deserializeOpDescribeCarrierGateways) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCarrierGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeBundleTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23651,7 +23262,7 @@ func (m *awsEc2query_deserializeOpDescribeCarrierGateways) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCarrierGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeBundleTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23684,14 +23295,14 @@ func awsEc2query_deserializeOpErrorDescribeCarrierGateways(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeClassicLinkInstances struct { +type awsEc2query_deserializeOpDescribeByoipCidrs struct { } -func (*awsEc2query_deserializeOpDescribeClassicLinkInstances) ID() string { +func (*awsEc2query_deserializeOpDescribeByoipCidrs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeClassicLinkInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeByoipCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23709,9 +23320,9 @@ func (m *awsEc2query_deserializeOpDescribeClassicLinkInstances) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeClassicLinkInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeByoipCidrs(response, &metadata) } - output := &DescribeClassicLinkInstancesOutput{} + output := &DescribeByoipCidrsOutput{} out.Result = output var buff [1024]byte @@ -23732,7 +23343,7 @@ func (m *awsEc2query_deserializeOpDescribeClassicLinkInstances) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeClassicLinkInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeByoipCidrsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23746,7 +23357,7 @@ func (m *awsEc2query_deserializeOpDescribeClassicLinkInstances) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeClassicLinkInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeByoipCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23779,14 +23390,14 @@ func awsEc2query_deserializeOpErrorDescribeClassicLinkInstances(response *smithy } } -type awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules struct { +type awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory struct { } -func (*awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23804,9 +23415,9 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnAuthorizationRules(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionHistory(response, &metadata) } - output := &DescribeClientVpnAuthorizationRulesOutput{} + output := &DescribeCapacityBlockExtensionHistoryOutput{} out.Result = output var buff [1024]byte @@ -23827,7 +23438,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeClientVpnAuthorizationRulesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockExtensionHistoryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23841,7 +23452,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeClientVpnAuthorizationRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23874,14 +23485,14 @@ func awsEc2query_deserializeOpErrorDescribeClientVpnAuthorizationRules(response } } -type awsEc2query_deserializeOpDescribeClientVpnConnections struct { +type awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings struct { } -func (*awsEc2query_deserializeOpDescribeClientVpnConnections) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeClientVpnConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityBlockExtensionOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23899,9 +23510,9 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnConnections) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnConnections(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionOfferings(response, &metadata) } - output := &DescribeClientVpnConnectionsOutput{} + output := &DescribeCapacityBlockExtensionOfferingsOutput{} out.Result = output var buff [1024]byte @@ -23922,7 +23533,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnConnections) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeClientVpnConnectionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockExtensionOfferingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23936,7 +23547,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnConnections) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeClientVpnConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityBlockExtensionOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -23969,14 +23580,14 @@ func awsEc2query_deserializeOpErrorDescribeClientVpnConnections(response *smithy } } -type awsEc2query_deserializeOpDescribeClientVpnEndpoints struct { +type awsEc2query_deserializeOpDescribeCapacityBlockOfferings struct { } -func (*awsEc2query_deserializeOpDescribeClientVpnEndpoints) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityBlockOfferings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeClientVpnEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityBlockOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -23994,9 +23605,9 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnEndpoints) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnEndpoints(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockOfferings(response, &metadata) } - output := &DescribeClientVpnEndpointsOutput{} + output := &DescribeCapacityBlockOfferingsOutput{} out.Result = output var buff [1024]byte @@ -24017,7 +23628,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnEndpoints) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeClientVpnEndpointsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockOfferingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24031,7 +23642,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnEndpoints) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeClientVpnEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityBlockOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24064,14 +23675,14 @@ func awsEc2query_deserializeOpErrorDescribeClientVpnEndpoints(response *smithyht } } -type awsEc2query_deserializeOpDescribeClientVpnRoutes struct { +type awsEc2query_deserializeOpDescribeCapacityBlocks struct { } -func (*awsEc2query_deserializeOpDescribeClientVpnRoutes) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityBlocks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeClientVpnRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityBlocks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24089,9 +23700,9 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnRoutes) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnRoutes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlocks(response, &metadata) } - output := &DescribeClientVpnRoutesOutput{} + output := &DescribeCapacityBlocksOutput{} out.Result = output var buff [1024]byte @@ -24112,7 +23723,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnRoutes) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeClientVpnRoutesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityBlocksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24126,7 +23737,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnRoutes) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeClientVpnRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityBlocks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24159,14 +23770,14 @@ func awsEc2query_deserializeOpErrorDescribeClientVpnRoutes(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeClientVpnTargetNetworks struct { +type awsEc2query_deserializeOpDescribeCapacityBlockStatus struct { } -func (*awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityBlockStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityBlockStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24184,9 +23795,9 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnTargetNetworks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityBlockStatus(response, &metadata) } - output := &DescribeClientVpnTargetNetworksOutput{} + output := &DescribeCapacityBlockStatusOutput{} out.Result = output var buff [1024]byte @@ -24207,7 +23818,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeClientVpnTargetNetworksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityBlockStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24221,7 +23832,7 @@ func (m *awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeClientVpnTargetNetworks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityBlockStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24254,14 +23865,14 @@ func awsEc2query_deserializeOpErrorDescribeClientVpnTargetNetworks(response *smi } } -type awsEc2query_deserializeOpDescribeCoipPools struct { +type awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests struct { } -func (*awsEc2query_deserializeOpDescribeCoipPools) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCoipPools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityReservationBillingRequests) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24279,9 +23890,9 @@ func (m *awsEc2query_deserializeOpDescribeCoipPools) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCoipPools(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityReservationBillingRequests(response, &metadata) } - output := &DescribeCoipPoolsOutput{} + output := &DescribeCapacityReservationBillingRequestsOutput{} out.Result = output var buff [1024]byte @@ -24302,7 +23913,7 @@ func (m *awsEc2query_deserializeOpDescribeCoipPools) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCoipPoolsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityReservationBillingRequestsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24316,7 +23927,7 @@ func (m *awsEc2query_deserializeOpDescribeCoipPools) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCoipPools(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityReservationBillingRequests(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24349,14 +23960,14 @@ func awsEc2query_deserializeOpErrorDescribeCoipPools(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDescribeConversionTasks struct { +type awsEc2query_deserializeOpDescribeCapacityReservationFleets struct { } -func (*awsEc2query_deserializeOpDescribeConversionTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityReservationFleets) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeConversionTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityReservationFleets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24374,9 +23985,9 @@ func (m *awsEc2query_deserializeOpDescribeConversionTasks) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeConversionTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityReservationFleets(response, &metadata) } - output := &DescribeConversionTasksOutput{} + output := &DescribeCapacityReservationFleetsOutput{} out.Result = output var buff [1024]byte @@ -24397,7 +24008,7 @@ func (m *awsEc2query_deserializeOpDescribeConversionTasks) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeConversionTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityReservationFleetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24411,7 +24022,7 @@ func (m *awsEc2query_deserializeOpDescribeConversionTasks) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeConversionTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityReservationFleets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24444,14 +24055,14 @@ func awsEc2query_deserializeOpErrorDescribeConversionTasks(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeCustomerGateways struct { +type awsEc2query_deserializeOpDescribeCapacityReservations struct { } -func (*awsEc2query_deserializeOpDescribeCustomerGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeCapacityReservations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeCustomerGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCapacityReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24469,9 +24080,9 @@ func (m *awsEc2query_deserializeOpDescribeCustomerGateways) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeCustomerGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCapacityReservations(response, &metadata) } - output := &DescribeCustomerGatewaysOutput{} + output := &DescribeCapacityReservationsOutput{} out.Result = output var buff [1024]byte @@ -24492,7 +24103,7 @@ func (m *awsEc2query_deserializeOpDescribeCustomerGateways) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeCustomerGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCapacityReservationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24506,7 +24117,7 @@ func (m *awsEc2query_deserializeOpDescribeCustomerGateways) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeCustomerGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCapacityReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24539,14 +24150,14 @@ func awsEc2query_deserializeOpErrorDescribeCustomerGateways(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeDeclarativePoliciesReports struct { +type awsEc2query_deserializeOpDescribeCarrierGateways struct { } -func (*awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) ID() string { +func (*awsEc2query_deserializeOpDescribeCarrierGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCarrierGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24564,9 +24175,9 @@ func (m *awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeDeclarativePoliciesReports(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCarrierGateways(response, &metadata) } - output := &DescribeDeclarativePoliciesReportsOutput{} + output := &DescribeCarrierGatewaysOutput{} out.Result = output var buff [1024]byte @@ -24587,7 +24198,7 @@ func (m *awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeDeclarativePoliciesReportsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCarrierGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24601,7 +24212,7 @@ func (m *awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeDeclarativePoliciesReports(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCarrierGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24634,14 +24245,14 @@ func awsEc2query_deserializeOpErrorDescribeDeclarativePoliciesReports(response * } } -type awsEc2query_deserializeOpDescribeDhcpOptions struct { +type awsEc2query_deserializeOpDescribeClassicLinkInstances struct { } -func (*awsEc2query_deserializeOpDescribeDhcpOptions) ID() string { +func (*awsEc2query_deserializeOpDescribeClassicLinkInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeDhcpOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeClassicLinkInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24659,9 +24270,9 @@ func (m *awsEc2query_deserializeOpDescribeDhcpOptions) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeDhcpOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeClassicLinkInstances(response, &metadata) } - output := &DescribeDhcpOptionsOutput{} + output := &DescribeClassicLinkInstancesOutput{} out.Result = output var buff [1024]byte @@ -24682,7 +24293,7 @@ func (m *awsEc2query_deserializeOpDescribeDhcpOptions) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeDhcpOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeClassicLinkInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24696,7 +24307,7 @@ func (m *awsEc2query_deserializeOpDescribeDhcpOptions) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeDhcpOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeClassicLinkInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24729,14 +24340,14 @@ func awsEc2query_deserializeOpErrorDescribeDhcpOptions(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways struct { +type awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules struct { } -func (*awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeClientVpnAuthorizationRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24754,9 +24365,9 @@ func (m *awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeEgressOnlyInternetGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnAuthorizationRules(response, &metadata) } - output := &DescribeEgressOnlyInternetGatewaysOutput{} + output := &DescribeClientVpnAuthorizationRulesOutput{} out.Result = output var buff [1024]byte @@ -24777,7 +24388,7 @@ func (m *awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeEgressOnlyInternetGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeClientVpnAuthorizationRulesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24791,7 +24402,7 @@ func (m *awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeEgressOnlyInternetGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeClientVpnAuthorizationRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24824,14 +24435,14 @@ func awsEc2query_deserializeOpErrorDescribeEgressOnlyInternetGateways(response * } } -type awsEc2query_deserializeOpDescribeElasticGpus struct { +type awsEc2query_deserializeOpDescribeClientVpnConnections struct { } -func (*awsEc2query_deserializeOpDescribeElasticGpus) ID() string { +func (*awsEc2query_deserializeOpDescribeClientVpnConnections) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeElasticGpus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeClientVpnConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24849,9 +24460,9 @@ func (m *awsEc2query_deserializeOpDescribeElasticGpus) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeElasticGpus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnConnections(response, &metadata) } - output := &DescribeElasticGpusOutput{} + output := &DescribeClientVpnConnectionsOutput{} out.Result = output var buff [1024]byte @@ -24872,7 +24483,7 @@ func (m *awsEc2query_deserializeOpDescribeElasticGpus) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeElasticGpusOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeClientVpnConnectionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24886,7 +24497,7 @@ func (m *awsEc2query_deserializeOpDescribeElasticGpus) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeElasticGpus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeClientVpnConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -24919,14 +24530,14 @@ func awsEc2query_deserializeOpErrorDescribeElasticGpus(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeExportImageTasks struct { +type awsEc2query_deserializeOpDescribeClientVpnEndpoints struct { } -func (*awsEc2query_deserializeOpDescribeExportImageTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeClientVpnEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeExportImageTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeClientVpnEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -24944,9 +24555,9 @@ func (m *awsEc2query_deserializeOpDescribeExportImageTasks) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeExportImageTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnEndpoints(response, &metadata) } - output := &DescribeExportImageTasksOutput{} + output := &DescribeClientVpnEndpointsOutput{} out.Result = output var buff [1024]byte @@ -24967,7 +24578,7 @@ func (m *awsEc2query_deserializeOpDescribeExportImageTasks) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeExportImageTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeClientVpnEndpointsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -24981,7 +24592,7 @@ func (m *awsEc2query_deserializeOpDescribeExportImageTasks) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeExportImageTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeClientVpnEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25014,14 +24625,14 @@ func awsEc2query_deserializeOpErrorDescribeExportImageTasks(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeExportTasks struct { +type awsEc2query_deserializeOpDescribeClientVpnRoutes struct { } -func (*awsEc2query_deserializeOpDescribeExportTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeClientVpnRoutes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeExportTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeClientVpnRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25039,9 +24650,9 @@ func (m *awsEc2query_deserializeOpDescribeExportTasks) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeExportTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnRoutes(response, &metadata) } - output := &DescribeExportTasksOutput{} + output := &DescribeClientVpnRoutesOutput{} out.Result = output var buff [1024]byte @@ -25062,7 +24673,7 @@ func (m *awsEc2query_deserializeOpDescribeExportTasks) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeExportTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeClientVpnRoutesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25076,7 +24687,7 @@ func (m *awsEc2query_deserializeOpDescribeExportTasks) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeExportTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeClientVpnRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25109,14 +24720,14 @@ func awsEc2query_deserializeOpErrorDescribeExportTasks(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeFastLaunchImages struct { +type awsEc2query_deserializeOpDescribeClientVpnTargetNetworks struct { } -func (*awsEc2query_deserializeOpDescribeFastLaunchImages) ID() string { +func (*awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFastLaunchImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeClientVpnTargetNetworks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25134,9 +24745,9 @@ func (m *awsEc2query_deserializeOpDescribeFastLaunchImages) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFastLaunchImages(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeClientVpnTargetNetworks(response, &metadata) } - output := &DescribeFastLaunchImagesOutput{} + output := &DescribeClientVpnTargetNetworksOutput{} out.Result = output var buff [1024]byte @@ -25157,7 +24768,7 @@ func (m *awsEc2query_deserializeOpDescribeFastLaunchImages) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFastLaunchImagesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeClientVpnTargetNetworksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25171,7 +24782,7 @@ func (m *awsEc2query_deserializeOpDescribeFastLaunchImages) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFastLaunchImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeClientVpnTargetNetworks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25204,14 +24815,14 @@ func awsEc2query_deserializeOpErrorDescribeFastLaunchImages(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeFastSnapshotRestores struct { +type awsEc2query_deserializeOpDescribeCoipPools struct { } -func (*awsEc2query_deserializeOpDescribeFastSnapshotRestores) ID() string { +func (*awsEc2query_deserializeOpDescribeCoipPools) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFastSnapshotRestores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCoipPools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25229,9 +24840,9 @@ func (m *awsEc2query_deserializeOpDescribeFastSnapshotRestores) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFastSnapshotRestores(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCoipPools(response, &metadata) } - output := &DescribeFastSnapshotRestoresOutput{} + output := &DescribeCoipPoolsOutput{} out.Result = output var buff [1024]byte @@ -25252,7 +24863,7 @@ func (m *awsEc2query_deserializeOpDescribeFastSnapshotRestores) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFastSnapshotRestoresOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCoipPoolsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25266,7 +24877,7 @@ func (m *awsEc2query_deserializeOpDescribeFastSnapshotRestores) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFastSnapshotRestores(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCoipPools(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25299,14 +24910,14 @@ func awsEc2query_deserializeOpErrorDescribeFastSnapshotRestores(response *smithy } } -type awsEc2query_deserializeOpDescribeFleetHistory struct { +type awsEc2query_deserializeOpDescribeConversionTasks struct { } -func (*awsEc2query_deserializeOpDescribeFleetHistory) ID() string { +func (*awsEc2query_deserializeOpDescribeConversionTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFleetHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeConversionTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25324,9 +24935,9 @@ func (m *awsEc2query_deserializeOpDescribeFleetHistory) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFleetHistory(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeConversionTasks(response, &metadata) } - output := &DescribeFleetHistoryOutput{} + output := &DescribeConversionTasksOutput{} out.Result = output var buff [1024]byte @@ -25347,7 +24958,7 @@ func (m *awsEc2query_deserializeOpDescribeFleetHistory) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFleetHistoryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeConversionTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25361,7 +24972,7 @@ func (m *awsEc2query_deserializeOpDescribeFleetHistory) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFleetHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeConversionTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25394,14 +25005,14 @@ func awsEc2query_deserializeOpErrorDescribeFleetHistory(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDescribeFleetInstances struct { +type awsEc2query_deserializeOpDescribeCustomerGateways struct { } -func (*awsEc2query_deserializeOpDescribeFleetInstances) ID() string { +func (*awsEc2query_deserializeOpDescribeCustomerGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFleetInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeCustomerGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25419,9 +25030,9 @@ func (m *awsEc2query_deserializeOpDescribeFleetInstances) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFleetInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeCustomerGateways(response, &metadata) } - output := &DescribeFleetInstancesOutput{} + output := &DescribeCustomerGatewaysOutput{} out.Result = output var buff [1024]byte @@ -25442,7 +25053,7 @@ func (m *awsEc2query_deserializeOpDescribeFleetInstances) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFleetInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeCustomerGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25456,7 +25067,7 @@ func (m *awsEc2query_deserializeOpDescribeFleetInstances) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFleetInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeCustomerGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25489,14 +25100,14 @@ func awsEc2query_deserializeOpErrorDescribeFleetInstances(response *smithyhttp.R } } -type awsEc2query_deserializeOpDescribeFleets struct { +type awsEc2query_deserializeOpDescribeDeclarativePoliciesReports struct { } -func (*awsEc2query_deserializeOpDescribeFleets) ID() string { +func (*awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFleets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeDeclarativePoliciesReports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25514,9 +25125,9 @@ func (m *awsEc2query_deserializeOpDescribeFleets) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFleets(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeDeclarativePoliciesReports(response, &metadata) } - output := &DescribeFleetsOutput{} + output := &DescribeDeclarativePoliciesReportsOutput{} out.Result = output var buff [1024]byte @@ -25537,7 +25148,7 @@ func (m *awsEc2query_deserializeOpDescribeFleets) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFleetsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeDeclarativePoliciesReportsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25551,7 +25162,7 @@ func (m *awsEc2query_deserializeOpDescribeFleets) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFleets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeDeclarativePoliciesReports(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25584,14 +25195,14 @@ func awsEc2query_deserializeOpErrorDescribeFleets(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDescribeFlowLogs struct { +type awsEc2query_deserializeOpDescribeDhcpOptions struct { } -func (*awsEc2query_deserializeOpDescribeFlowLogs) ID() string { +func (*awsEc2query_deserializeOpDescribeDhcpOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFlowLogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeDhcpOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25609,9 +25220,9 @@ func (m *awsEc2query_deserializeOpDescribeFlowLogs) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFlowLogs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeDhcpOptions(response, &metadata) } - output := &DescribeFlowLogsOutput{} + output := &DescribeDhcpOptionsOutput{} out.Result = output var buff [1024]byte @@ -25632,7 +25243,7 @@ func (m *awsEc2query_deserializeOpDescribeFlowLogs) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFlowLogsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeDhcpOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25646,7 +25257,7 @@ func (m *awsEc2query_deserializeOpDescribeFlowLogs) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFlowLogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeDhcpOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25679,14 +25290,14 @@ func awsEc2query_deserializeOpErrorDescribeFlowLogs(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDescribeFpgaImageAttribute struct { +type awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways struct { } -func (*awsEc2query_deserializeOpDescribeFpgaImageAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFpgaImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeEgressOnlyInternetGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25704,9 +25315,9 @@ func (m *awsEc2query_deserializeOpDescribeFpgaImageAttribute) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFpgaImageAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeEgressOnlyInternetGateways(response, &metadata) } - output := &DescribeFpgaImageAttributeOutput{} + output := &DescribeEgressOnlyInternetGatewaysOutput{} out.Result = output var buff [1024]byte @@ -25727,7 +25338,7 @@ func (m *awsEc2query_deserializeOpDescribeFpgaImageAttribute) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFpgaImageAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeEgressOnlyInternetGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25741,7 +25352,7 @@ func (m *awsEc2query_deserializeOpDescribeFpgaImageAttribute) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFpgaImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeEgressOnlyInternetGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25774,14 +25385,14 @@ func awsEc2query_deserializeOpErrorDescribeFpgaImageAttribute(response *smithyht } } -type awsEc2query_deserializeOpDescribeFpgaImages struct { +type awsEc2query_deserializeOpDescribeElasticGpus struct { } -func (*awsEc2query_deserializeOpDescribeFpgaImages) ID() string { +func (*awsEc2query_deserializeOpDescribeElasticGpus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeFpgaImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeElasticGpus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25799,9 +25410,9 @@ func (m *awsEc2query_deserializeOpDescribeFpgaImages) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeFpgaImages(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeElasticGpus(response, &metadata) } - output := &DescribeFpgaImagesOutput{} + output := &DescribeElasticGpusOutput{} out.Result = output var buff [1024]byte @@ -25822,7 +25433,7 @@ func (m *awsEc2query_deserializeOpDescribeFpgaImages) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeFpgaImagesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeElasticGpusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25836,7 +25447,7 @@ func (m *awsEc2query_deserializeOpDescribeFpgaImages) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeFpgaImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeElasticGpus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25869,14 +25480,14 @@ func awsEc2query_deserializeOpErrorDescribeFpgaImages(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpDescribeHostReservationOfferings struct { +type awsEc2query_deserializeOpDescribeExportImageTasks struct { } -func (*awsEc2query_deserializeOpDescribeHostReservationOfferings) ID() string { +func (*awsEc2query_deserializeOpDescribeExportImageTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeHostReservationOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeExportImageTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25894,9 +25505,9 @@ func (m *awsEc2query_deserializeOpDescribeHostReservationOfferings) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeHostReservationOfferings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeExportImageTasks(response, &metadata) } - output := &DescribeHostReservationOfferingsOutput{} + output := &DescribeExportImageTasksOutput{} out.Result = output var buff [1024]byte @@ -25917,7 +25528,7 @@ func (m *awsEc2query_deserializeOpDescribeHostReservationOfferings) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeHostReservationOfferingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeExportImageTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -25931,7 +25542,7 @@ func (m *awsEc2query_deserializeOpDescribeHostReservationOfferings) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeHostReservationOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeExportImageTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -25964,14 +25575,14 @@ func awsEc2query_deserializeOpErrorDescribeHostReservationOfferings(response *sm } } -type awsEc2query_deserializeOpDescribeHostReservations struct { +type awsEc2query_deserializeOpDescribeExportTasks struct { } -func (*awsEc2query_deserializeOpDescribeHostReservations) ID() string { +func (*awsEc2query_deserializeOpDescribeExportTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeHostReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeExportTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -25989,9 +25600,9 @@ func (m *awsEc2query_deserializeOpDescribeHostReservations) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeHostReservations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeExportTasks(response, &metadata) } - output := &DescribeHostReservationsOutput{} + output := &DescribeExportTasksOutput{} out.Result = output var buff [1024]byte @@ -26012,7 +25623,7 @@ func (m *awsEc2query_deserializeOpDescribeHostReservations) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeHostReservationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeExportTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26026,7 +25637,7 @@ func (m *awsEc2query_deserializeOpDescribeHostReservations) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeHostReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeExportTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26059,14 +25670,14 @@ func awsEc2query_deserializeOpErrorDescribeHostReservations(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeHosts struct { +type awsEc2query_deserializeOpDescribeFastLaunchImages struct { } -func (*awsEc2query_deserializeOpDescribeHosts) ID() string { +func (*awsEc2query_deserializeOpDescribeFastLaunchImages) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFastLaunchImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26084,9 +25695,9 @@ func (m *awsEc2query_deserializeOpDescribeHosts) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeHosts(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFastLaunchImages(response, &metadata) } - output := &DescribeHostsOutput{} + output := &DescribeFastLaunchImagesOutput{} out.Result = output var buff [1024]byte @@ -26107,7 +25718,7 @@ func (m *awsEc2query_deserializeOpDescribeHosts) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeHostsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFastLaunchImagesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26121,7 +25732,7 @@ func (m *awsEc2query_deserializeOpDescribeHosts) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFastLaunchImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26154,14 +25765,14 @@ func awsEc2query_deserializeOpErrorDescribeHosts(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations struct { +type awsEc2query_deserializeOpDescribeFastSnapshotRestores struct { } -func (*awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeFastSnapshotRestores) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFastSnapshotRestores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26179,9 +25790,9 @@ func (m *awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) Handle } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIamInstanceProfileAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFastSnapshotRestores(response, &metadata) } - output := &DescribeIamInstanceProfileAssociationsOutput{} + output := &DescribeFastSnapshotRestoresOutput{} out.Result = output var buff [1024]byte @@ -26202,7 +25813,7 @@ func (m *awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) Handle } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIamInstanceProfileAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFastSnapshotRestoresOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26216,7 +25827,7 @@ func (m *awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) Handle return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIamInstanceProfileAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFastSnapshotRestores(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26249,14 +25860,14 @@ func awsEc2query_deserializeOpErrorDescribeIamInstanceProfileAssociations(respon } } -type awsEc2query_deserializeOpDescribeIdentityIdFormat struct { +type awsEc2query_deserializeOpDescribeFleetHistory struct { } -func (*awsEc2query_deserializeOpDescribeIdentityIdFormat) ID() string { +func (*awsEc2query_deserializeOpDescribeFleetHistory) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIdentityIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFleetHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26274,9 +25885,9 @@ func (m *awsEc2query_deserializeOpDescribeIdentityIdFormat) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIdentityIdFormat(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFleetHistory(response, &metadata) } - output := &DescribeIdentityIdFormatOutput{} + output := &DescribeFleetHistoryOutput{} out.Result = output var buff [1024]byte @@ -26297,7 +25908,7 @@ func (m *awsEc2query_deserializeOpDescribeIdentityIdFormat) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIdentityIdFormatOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFleetHistoryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26311,7 +25922,7 @@ func (m *awsEc2query_deserializeOpDescribeIdentityIdFormat) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIdentityIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFleetHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26344,14 +25955,14 @@ func awsEc2query_deserializeOpErrorDescribeIdentityIdFormat(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeIdFormat struct { +type awsEc2query_deserializeOpDescribeFleetInstances struct { } -func (*awsEc2query_deserializeOpDescribeIdFormat) ID() string { +func (*awsEc2query_deserializeOpDescribeFleetInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFleetInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26369,9 +25980,9 @@ func (m *awsEc2query_deserializeOpDescribeIdFormat) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIdFormat(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFleetInstances(response, &metadata) } - output := &DescribeIdFormatOutput{} + output := &DescribeFleetInstancesOutput{} out.Result = output var buff [1024]byte @@ -26392,7 +26003,7 @@ func (m *awsEc2query_deserializeOpDescribeIdFormat) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIdFormatOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFleetInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26406,7 +26017,7 @@ func (m *awsEc2query_deserializeOpDescribeIdFormat) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFleetInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26439,14 +26050,14 @@ func awsEc2query_deserializeOpErrorDescribeIdFormat(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDescribeImageAttribute struct { +type awsEc2query_deserializeOpDescribeFleets struct { } -func (*awsEc2query_deserializeOpDescribeImageAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeFleets) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFleets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26464,9 +26075,9 @@ func (m *awsEc2query_deserializeOpDescribeImageAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeImageAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFleets(response, &metadata) } - output := &DescribeImageAttributeOutput{} + output := &DescribeFleetsOutput{} out.Result = output var buff [1024]byte @@ -26487,7 +26098,7 @@ func (m *awsEc2query_deserializeOpDescribeImageAttribute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeImageAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFleetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26501,7 +26112,7 @@ func (m *awsEc2query_deserializeOpDescribeImageAttribute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFleets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26534,14 +26145,14 @@ func awsEc2query_deserializeOpErrorDescribeImageAttribute(response *smithyhttp.R } } -type awsEc2query_deserializeOpDescribeImages struct { +type awsEc2query_deserializeOpDescribeFlowLogs struct { } -func (*awsEc2query_deserializeOpDescribeImages) ID() string { +func (*awsEc2query_deserializeOpDescribeFlowLogs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFlowLogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26559,9 +26170,9 @@ func (m *awsEc2query_deserializeOpDescribeImages) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeImages(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFlowLogs(response, &metadata) } - output := &DescribeImagesOutput{} + output := &DescribeFlowLogsOutput{} out.Result = output var buff [1024]byte @@ -26582,7 +26193,7 @@ func (m *awsEc2query_deserializeOpDescribeImages) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeImagesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFlowLogsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26596,7 +26207,7 @@ func (m *awsEc2query_deserializeOpDescribeImages) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFlowLogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26629,14 +26240,14 @@ func awsEc2query_deserializeOpErrorDescribeImages(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDescribeImportImageTasks struct { +type awsEc2query_deserializeOpDescribeFpgaImageAttribute struct { } -func (*awsEc2query_deserializeOpDescribeImportImageTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeFpgaImageAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeImportImageTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFpgaImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26654,9 +26265,9 @@ func (m *awsEc2query_deserializeOpDescribeImportImageTasks) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeImportImageTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFpgaImageAttribute(response, &metadata) } - output := &DescribeImportImageTasksOutput{} + output := &DescribeFpgaImageAttributeOutput{} out.Result = output var buff [1024]byte @@ -26677,7 +26288,7 @@ func (m *awsEc2query_deserializeOpDescribeImportImageTasks) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeImportImageTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFpgaImageAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26691,7 +26302,7 @@ func (m *awsEc2query_deserializeOpDescribeImportImageTasks) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeImportImageTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFpgaImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26724,14 +26335,14 @@ func awsEc2query_deserializeOpErrorDescribeImportImageTasks(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeImportSnapshotTasks struct { +type awsEc2query_deserializeOpDescribeFpgaImages struct { } -func (*awsEc2query_deserializeOpDescribeImportSnapshotTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeFpgaImages) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeImportSnapshotTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeFpgaImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26749,9 +26360,9 @@ func (m *awsEc2query_deserializeOpDescribeImportSnapshotTasks) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeImportSnapshotTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeFpgaImages(response, &metadata) } - output := &DescribeImportSnapshotTasksOutput{} + output := &DescribeFpgaImagesOutput{} out.Result = output var buff [1024]byte @@ -26772,7 +26383,7 @@ func (m *awsEc2query_deserializeOpDescribeImportSnapshotTasks) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeImportSnapshotTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeFpgaImagesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26786,7 +26397,7 @@ func (m *awsEc2query_deserializeOpDescribeImportSnapshotTasks) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeImportSnapshotTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeFpgaImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26819,14 +26430,14 @@ func awsEc2query_deserializeOpErrorDescribeImportSnapshotTasks(response *smithyh } } -type awsEc2query_deserializeOpDescribeInstanceAttribute struct { +type awsEc2query_deserializeOpDescribeHostReservationOfferings struct { } -func (*awsEc2query_deserializeOpDescribeInstanceAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeHostReservationOfferings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeHostReservationOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26844,9 +26455,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceAttribute) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeHostReservationOfferings(response, &metadata) } - output := &DescribeInstanceAttributeOutput{} + output := &DescribeHostReservationOfferingsOutput{} out.Result = output var buff [1024]byte @@ -26867,7 +26478,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceAttribute) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeHostReservationOfferingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26881,7 +26492,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceAttribute) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeHostReservationOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -26914,14 +26525,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceAttribute(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeInstanceConnectEndpoints struct { +type awsEc2query_deserializeOpDescribeHostReservations struct { } -func (*awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) ID() string { +func (*awsEc2query_deserializeOpDescribeHostReservations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeHostReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -26939,9 +26550,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceConnectEndpoints(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeHostReservations(response, &metadata) } - output := &DescribeInstanceConnectEndpointsOutput{} + output := &DescribeHostReservationsOutput{} out.Result = output var buff [1024]byte @@ -26962,7 +26573,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceConnectEndpointsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeHostReservationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -26976,7 +26587,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceConnectEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeHostReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27009,14 +26620,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceConnectEndpoints(response *sm } } -type awsEc2query_deserializeOpDescribeInstanceCreditSpecifications struct { +type awsEc2query_deserializeOpDescribeHosts struct { } -func (*awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) ID() string { +func (*awsEc2query_deserializeOpDescribeHosts) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27034,9 +26645,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceCreditSpecifications(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeHosts(response, &metadata) } - output := &DescribeInstanceCreditSpecificationsOutput{} + output := &DescribeHostsOutput{} out.Result = output var buff [1024]byte @@ -27057,7 +26668,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceCreditSpecificationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeHostsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27071,7 +26682,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceCreditSpecifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27104,14 +26715,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceCreditSpecifications(response } } -type awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes struct { +type awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations struct { } -func (*awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) ID() string { +func (*awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIamInstanceProfileAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27129,9 +26740,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceEventNotificationAttributes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIamInstanceProfileAssociations(response, &metadata) } - output := &DescribeInstanceEventNotificationAttributesOutput{} + output := &DescribeIamInstanceProfileAssociationsOutput{} out.Result = output var buff [1024]byte @@ -27152,7 +26763,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceEventNotificationAttributesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIamInstanceProfileAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27166,7 +26777,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) H return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceEventNotificationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIamInstanceProfileAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27199,14 +26810,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceEventNotificationAttributes(r } } -type awsEc2query_deserializeOpDescribeInstanceEventWindows struct { +type awsEc2query_deserializeOpDescribeIdentityIdFormat struct { } -func (*awsEc2query_deserializeOpDescribeInstanceEventWindows) ID() string { +func (*awsEc2query_deserializeOpDescribeIdentityIdFormat) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceEventWindows) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIdentityIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27224,9 +26835,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceEventWindows) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceEventWindows(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIdentityIdFormat(response, &metadata) } - output := &DescribeInstanceEventWindowsOutput{} + output := &DescribeIdentityIdFormatOutput{} out.Result = output var buff [1024]byte @@ -27247,7 +26858,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceEventWindows) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceEventWindowsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIdentityIdFormatOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27261,7 +26872,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceEventWindows) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceEventWindows(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIdentityIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27294,14 +26905,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceEventWindows(response *smithy } } -type awsEc2query_deserializeOpDescribeInstanceImageMetadata struct { +type awsEc2query_deserializeOpDescribeIdFormat struct { } -func (*awsEc2query_deserializeOpDescribeInstanceImageMetadata) ID() string { +func (*awsEc2query_deserializeOpDescribeIdFormat) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceImageMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27319,9 +26930,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceImageMetadata) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceImageMetadata(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIdFormat(response, &metadata) } - output := &DescribeInstanceImageMetadataOutput{} + output := &DescribeIdFormatOutput{} out.Result = output var buff [1024]byte @@ -27342,7 +26953,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceImageMetadata) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceImageMetadataOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIdFormatOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27356,7 +26967,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceImageMetadata) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceImageMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27389,14 +27000,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceImageMetadata(response *smith } } -type awsEc2query_deserializeOpDescribeInstances struct { +type awsEc2query_deserializeOpDescribeImageAttribute struct { } -func (*awsEc2query_deserializeOpDescribeInstances) ID() string { +func (*awsEc2query_deserializeOpDescribeImageAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27414,9 +27025,9 @@ func (m *awsEc2query_deserializeOpDescribeInstances) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImageAttribute(response, &metadata) } - output := &DescribeInstancesOutput{} + output := &DescribeImageAttributeOutput{} out.Result = output var buff [1024]byte @@ -27437,7 +27048,7 @@ func (m *awsEc2query_deserializeOpDescribeInstances) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImageAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27451,7 +27062,7 @@ func (m *awsEc2query_deserializeOpDescribeInstances) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27484,14 +27095,14 @@ func awsEc2query_deserializeOpErrorDescribeInstances(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDescribeInstanceStatus struct { +type awsEc2query_deserializeOpDescribeImageReferences struct { } -func (*awsEc2query_deserializeOpDescribeInstanceStatus) ID() string { +func (*awsEc2query_deserializeOpDescribeImageReferences) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImageReferences) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27509,9 +27120,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceStatus) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceStatus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImageReferences(response, &metadata) } - output := &DescribeInstanceStatusOutput{} + output := &DescribeImageReferencesOutput{} out.Result = output var buff [1024]byte @@ -27532,7 +27143,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceStatus) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceStatusOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImageReferencesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27546,7 +27157,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceStatus) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImageReferences(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27579,14 +27190,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceStatus(response *smithyhttp.R } } -type awsEc2query_deserializeOpDescribeInstanceTopology struct { +type awsEc2query_deserializeOpDescribeImages struct { } -func (*awsEc2query_deserializeOpDescribeInstanceTopology) ID() string { +func (*awsEc2query_deserializeOpDescribeImages) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceTopology) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27604,9 +27215,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTopology) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceTopology(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImages(response, &metadata) } - output := &DescribeInstanceTopologyOutput{} + output := &DescribeImagesOutput{} out.Result = output var buff [1024]byte @@ -27627,7 +27238,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTopology) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceTopologyOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImagesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27641,7 +27252,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTopology) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceTopology(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27674,14 +27285,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceTopology(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeInstanceTypeOfferings struct { +type awsEc2query_deserializeOpDescribeImageUsageReportEntries struct { } -func (*awsEc2query_deserializeOpDescribeInstanceTypeOfferings) ID() string { +func (*awsEc2query_deserializeOpDescribeImageUsageReportEntries) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceTypeOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImageUsageReportEntries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27699,9 +27310,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTypeOfferings) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceTypeOfferings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImageUsageReportEntries(response, &metadata) } - output := &DescribeInstanceTypeOfferingsOutput{} + output := &DescribeImageUsageReportEntriesOutput{} out.Result = output var buff [1024]byte @@ -27722,7 +27333,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTypeOfferings) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceTypeOfferingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImageUsageReportEntriesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27736,7 +27347,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTypeOfferings) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceTypeOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImageUsageReportEntries(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27769,14 +27380,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceTypeOfferings(response *smith } } -type awsEc2query_deserializeOpDescribeInstanceTypes struct { +type awsEc2query_deserializeOpDescribeImageUsageReports struct { } -func (*awsEc2query_deserializeOpDescribeInstanceTypes) ID() string { +func (*awsEc2query_deserializeOpDescribeImageUsageReports) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInstanceTypes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImageUsageReports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27794,9 +27405,9 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTypes) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceTypes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImageUsageReports(response, &metadata) } - output := &DescribeInstanceTypesOutput{} + output := &DescribeImageUsageReportsOutput{} out.Result = output var buff [1024]byte @@ -27817,7 +27428,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTypes) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInstanceTypesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImageUsageReportsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27831,7 +27442,7 @@ func (m *awsEc2query_deserializeOpDescribeInstanceTypes) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInstanceTypes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImageUsageReports(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27864,14 +27475,14 @@ func awsEc2query_deserializeOpErrorDescribeInstanceTypes(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDescribeInternetGateways struct { +type awsEc2query_deserializeOpDescribeImportImageTasks struct { } -func (*awsEc2query_deserializeOpDescribeInternetGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeImportImageTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeInternetGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImportImageTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27889,9 +27500,9 @@ func (m *awsEc2query_deserializeOpDescribeInternetGateways) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeInternetGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImportImageTasks(response, &metadata) } - output := &DescribeInternetGatewaysOutput{} + output := &DescribeImportImageTasksOutput{} out.Result = output var buff [1024]byte @@ -27912,7 +27523,7 @@ func (m *awsEc2query_deserializeOpDescribeInternetGateways) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeInternetGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImportImageTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -27926,7 +27537,7 @@ func (m *awsEc2query_deserializeOpDescribeInternetGateways) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeInternetGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImportImageTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -27959,14 +27570,14 @@ func awsEc2query_deserializeOpErrorDescribeInternetGateways(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeIpamByoasn struct { +type awsEc2query_deserializeOpDescribeImportSnapshotTasks struct { } -func (*awsEc2query_deserializeOpDescribeIpamByoasn) ID() string { +func (*awsEc2query_deserializeOpDescribeImportSnapshotTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeImportSnapshotTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -27984,9 +27595,9 @@ func (m *awsEc2query_deserializeOpDescribeIpamByoasn) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamByoasn(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeImportSnapshotTasks(response, &metadata) } - output := &DescribeIpamByoasnOutput{} + output := &DescribeImportSnapshotTasksOutput{} out.Result = output var buff [1024]byte @@ -28007,7 +27618,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamByoasn) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamByoasnOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeImportSnapshotTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28021,7 +27632,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamByoasn) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeImportSnapshotTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28054,14 +27665,14 @@ func awsEc2query_deserializeOpErrorDescribeIpamByoasn(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens struct { +type awsEc2query_deserializeOpDescribeInstanceAttribute struct { } -func (*awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28079,9 +27690,9 @@ func (m *awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamExternalResourceVerificationTokens(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceAttribute(response, &metadata) } - output := &DescribeIpamExternalResourceVerificationTokensOutput{} + output := &DescribeInstanceAttributeOutput{} out.Result = output var buff [1024]byte @@ -28102,7 +27713,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamExternalResourceVerificationTokensOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28116,7 +27727,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpamExternalResourceVerificationTokens(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28149,14 +27760,14 @@ func awsEc2query_deserializeOpErrorDescribeIpamExternalResourceVerificationToken } } -type awsEc2query_deserializeOpDescribeIpamPools struct { +type awsEc2query_deserializeOpDescribeInstanceConnectEndpoints struct { } -func (*awsEc2query_deserializeOpDescribeIpamPools) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpamPools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceConnectEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28174,9 +27785,9 @@ func (m *awsEc2query_deserializeOpDescribeIpamPools) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamPools(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceConnectEndpoints(response, &metadata) } - output := &DescribeIpamPoolsOutput{} + output := &DescribeInstanceConnectEndpointsOutput{} out.Result = output var buff [1024]byte @@ -28197,7 +27808,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamPools) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamPoolsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceConnectEndpointsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28211,7 +27822,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamPools) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpamPools(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceConnectEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28244,14 +27855,14 @@ func awsEc2query_deserializeOpErrorDescribeIpamPools(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDescribeIpamResourceDiscoveries struct { +type awsEc2query_deserializeOpDescribeInstanceCreditSpecifications struct { } -func (*awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceCreditSpecifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28269,9 +27880,9 @@ func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveries(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceCreditSpecifications(response, &metadata) } - output := &DescribeIpamResourceDiscoveriesOutput{} + output := &DescribeInstanceCreditSpecificationsOutput{} out.Result = output var buff [1024]byte @@ -28292,7 +27903,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamResourceDiscoveriesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceCreditSpecificationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28306,7 +27917,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveries(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceCreditSpecifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28339,14 +27950,14 @@ func awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveries(response *smi } } -type awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations struct { +type awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes struct { } -func (*awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceEventNotificationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28364,9 +27975,9 @@ func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) Han } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveryAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceEventNotificationAttributes(response, &metadata) } - output := &DescribeIpamResourceDiscoveryAssociationsOutput{} + output := &DescribeInstanceEventNotificationAttributesOutput{} out.Result = output var buff [1024]byte @@ -28387,7 +27998,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) Han } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamResourceDiscoveryAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceEventNotificationAttributesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28401,7 +28012,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) Han return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveryAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceEventNotificationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28434,14 +28045,14 @@ func awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveryAssociations(res } } -type awsEc2query_deserializeOpDescribeIpams struct { +type awsEc2query_deserializeOpDescribeInstanceEventWindows struct { } -func (*awsEc2query_deserializeOpDescribeIpams) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceEventWindows) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpams) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceEventWindows) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28459,9 +28070,9 @@ func (m *awsEc2query_deserializeOpDescribeIpams) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpams(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceEventWindows(response, &metadata) } - output := &DescribeIpamsOutput{} + output := &DescribeInstanceEventWindowsOutput{} out.Result = output var buff [1024]byte @@ -28482,7 +28093,7 @@ func (m *awsEc2query_deserializeOpDescribeIpams) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceEventWindowsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28496,7 +28107,7 @@ func (m *awsEc2query_deserializeOpDescribeIpams) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpams(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceEventWindows(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28529,14 +28140,14 @@ func awsEc2query_deserializeOpErrorDescribeIpams(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpDescribeIpamScopes struct { +type awsEc2query_deserializeOpDescribeInstanceImageMetadata struct { } -func (*awsEc2query_deserializeOpDescribeIpamScopes) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceImageMetadata) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpamScopes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceImageMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28554,9 +28165,9 @@ func (m *awsEc2query_deserializeOpDescribeIpamScopes) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamScopes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceImageMetadata(response, &metadata) } - output := &DescribeIpamScopesOutput{} + output := &DescribeInstanceImageMetadataOutput{} out.Result = output var buff [1024]byte @@ -28577,7 +28188,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamScopes) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpamScopesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceImageMetadataOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28591,7 +28202,7 @@ func (m *awsEc2query_deserializeOpDescribeIpamScopes) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpamScopes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceImageMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28624,14 +28235,14 @@ func awsEc2query_deserializeOpErrorDescribeIpamScopes(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpDescribeIpv6Pools struct { +type awsEc2query_deserializeOpDescribeInstances struct { } -func (*awsEc2query_deserializeOpDescribeIpv6Pools) ID() string { +func (*awsEc2query_deserializeOpDescribeInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeIpv6Pools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28649,9 +28260,9 @@ func (m *awsEc2query_deserializeOpDescribeIpv6Pools) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeIpv6Pools(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstances(response, &metadata) } - output := &DescribeIpv6PoolsOutput{} + output := &DescribeInstancesOutput{} out.Result = output var buff [1024]byte @@ -28672,7 +28283,7 @@ func (m *awsEc2query_deserializeOpDescribeIpv6Pools) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeIpv6PoolsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28686,7 +28297,7 @@ func (m *awsEc2query_deserializeOpDescribeIpv6Pools) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeIpv6Pools(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28719,14 +28330,14 @@ func awsEc2query_deserializeOpErrorDescribeIpv6Pools(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDescribeKeyPairs struct { +type awsEc2query_deserializeOpDescribeInstanceStatus struct { } -func (*awsEc2query_deserializeOpDescribeKeyPairs) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeKeyPairs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28744,9 +28355,9 @@ func (m *awsEc2query_deserializeOpDescribeKeyPairs) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeKeyPairs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceStatus(response, &metadata) } - output := &DescribeKeyPairsOutput{} + output := &DescribeInstanceStatusOutput{} out.Result = output var buff [1024]byte @@ -28767,7 +28378,7 @@ func (m *awsEc2query_deserializeOpDescribeKeyPairs) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeKeyPairsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28781,7 +28392,7 @@ func (m *awsEc2query_deserializeOpDescribeKeyPairs) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeKeyPairs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28814,14 +28425,14 @@ func awsEc2query_deserializeOpErrorDescribeKeyPairs(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDescribeLaunchTemplates struct { +type awsEc2query_deserializeOpDescribeInstanceTopology struct { } -func (*awsEc2query_deserializeOpDescribeLaunchTemplates) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceTopology) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLaunchTemplates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceTopology) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28839,9 +28450,9 @@ func (m *awsEc2query_deserializeOpDescribeLaunchTemplates) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLaunchTemplates(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceTopology(response, &metadata) } - output := &DescribeLaunchTemplatesOutput{} + output := &DescribeInstanceTopologyOutput{} out.Result = output var buff [1024]byte @@ -28862,7 +28473,7 @@ func (m *awsEc2query_deserializeOpDescribeLaunchTemplates) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLaunchTemplatesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceTopologyOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28876,7 +28487,7 @@ func (m *awsEc2query_deserializeOpDescribeLaunchTemplates) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLaunchTemplates(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceTopology(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -28909,14 +28520,14 @@ func awsEc2query_deserializeOpErrorDescribeLaunchTemplates(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeLaunchTemplateVersions struct { +type awsEc2query_deserializeOpDescribeInstanceTypeOfferings struct { } -func (*awsEc2query_deserializeOpDescribeLaunchTemplateVersions) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceTypeOfferings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLaunchTemplateVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceTypeOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -28934,9 +28545,9 @@ func (m *awsEc2query_deserializeOpDescribeLaunchTemplateVersions) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLaunchTemplateVersions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceTypeOfferings(response, &metadata) } - output := &DescribeLaunchTemplateVersionsOutput{} + output := &DescribeInstanceTypeOfferingsOutput{} out.Result = output var buff [1024]byte @@ -28957,7 +28568,7 @@ func (m *awsEc2query_deserializeOpDescribeLaunchTemplateVersions) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLaunchTemplateVersionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceTypeOfferingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -28971,7 +28582,7 @@ func (m *awsEc2query_deserializeOpDescribeLaunchTemplateVersions) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLaunchTemplateVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceTypeOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29004,14 +28615,14 @@ func awsEc2query_deserializeOpErrorDescribeLaunchTemplateVersions(response *smit } } -type awsEc2query_deserializeOpDescribeLocalGatewayRouteTables struct { +type awsEc2query_deserializeOpDescribeInstanceTypes struct { } -func (*awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) ID() string { +func (*awsEc2query_deserializeOpDescribeInstanceTypes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInstanceTypes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29029,9 +28640,9 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTables(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInstanceTypes(response, &metadata) } - output := &DescribeLocalGatewayRouteTablesOutput{} + output := &DescribeInstanceTypesOutput{} out.Result = output var buff [1024]byte @@ -29052,7 +28663,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTablesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInstanceTypesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29066,7 +28677,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInstanceTypes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29099,14 +28710,14 @@ func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTables(response *smi } } -type awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations struct { +type awsEc2query_deserializeOpDescribeInternetGateways struct { } -func (*awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeInternetGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeInternetGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29124,9 +28735,9 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterface } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeInternetGateways(response, &metadata) } - output := &DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput{} + output := &DescribeInternetGatewaysOutput{} out.Result = output var buff [1024]byte @@ -29147,7 +28758,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterface } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeInternetGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29161,7 +28772,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterface return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeInternetGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29194,14 +28805,14 @@ func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVirtualInterfac } } -type awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations struct { +type awsEc2query_deserializeOpDescribeIpamByoasn struct { } -func (*awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeIpamByoasn) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29219,9 +28830,9 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVpcAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamByoasn(response, &metadata) } - output := &DescribeLocalGatewayRouteTableVpcAssociationsOutput{} + output := &DescribeIpamByoasnOutput{} out.Result = output var buff [1024]byte @@ -29242,7 +28853,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamByoasnOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29256,7 +28867,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVpcAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29289,14 +28900,14 @@ func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVpcAssociations } } -type awsEc2query_deserializeOpDescribeLocalGateways struct { +type awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens struct { } -func (*awsEc2query_deserializeOpDescribeLocalGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLocalGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpamExternalResourceVerificationTokens) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29314,9 +28925,9 @@ func (m *awsEc2query_deserializeOpDescribeLocalGateways) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamExternalResourceVerificationTokens(response, &metadata) } - output := &DescribeLocalGatewaysOutput{} + output := &DescribeIpamExternalResourceVerificationTokensOutput{} out.Result = output var buff [1024]byte @@ -29337,7 +28948,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGateways) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLocalGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamExternalResourceVerificationTokensOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29351,7 +28962,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGateways) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLocalGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpamExternalResourceVerificationTokens(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29384,14 +28995,14 @@ func awsEc2query_deserializeOpErrorDescribeLocalGateways(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups struct { +type awsEc2query_deserializeOpDescribeIpamPools struct { } -func (*awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) ID() string { +func (*awsEc2query_deserializeOpDescribeIpamPools) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpamPools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29409,9 +29020,9 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaceGroups(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamPools(response, &metadata) } - output := &DescribeLocalGatewayVirtualInterfaceGroupsOutput{} + output := &DescribeIpamPoolsOutput{} out.Result = output var buff [1024]byte @@ -29432,7 +29043,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroupsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamPoolsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29446,7 +29057,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaceGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpamPools(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29479,14 +29090,14 @@ func awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaceGroups(re } } -type awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces struct { +type awsEc2query_deserializeOpDescribeIpamResourceDiscoveries struct { } -func (*awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) ID() string { +func (*awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29504,9 +29115,9 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaces(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveries(response, &metadata) } - output := &DescribeLocalGatewayVirtualInterfacesOutput{} + output := &DescribeIpamResourceDiscoveriesOutput{} out.Result = output var buff [1024]byte @@ -29527,7 +29138,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfacesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamResourceDiscoveriesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29541,7 +29152,7 @@ func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaces(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveries(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29574,14 +29185,14 @@ func awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaces(respons } } -type awsEc2query_deserializeOpDescribeLockedSnapshots struct { +type awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations struct { } -func (*awsEc2query_deserializeOpDescribeLockedSnapshots) ID() string { +func (*awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeLockedSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpamResourceDiscoveryAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29599,9 +29210,9 @@ func (m *awsEc2query_deserializeOpDescribeLockedSnapshots) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeLockedSnapshots(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveryAssociations(response, &metadata) } - output := &DescribeLockedSnapshotsOutput{} + output := &DescribeIpamResourceDiscoveryAssociationsOutput{} out.Result = output var buff [1024]byte @@ -29622,7 +29233,7 @@ func (m *awsEc2query_deserializeOpDescribeLockedSnapshots) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamResourceDiscoveryAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29636,7 +29247,7 @@ func (m *awsEc2query_deserializeOpDescribeLockedSnapshots) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeLockedSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpamResourceDiscoveryAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29669,14 +29280,14 @@ func awsEc2query_deserializeOpErrorDescribeLockedSnapshots(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeMacHosts struct { +type awsEc2query_deserializeOpDescribeIpams struct { } -func (*awsEc2query_deserializeOpDescribeMacHosts) ID() string { +func (*awsEc2query_deserializeOpDescribeIpams) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeMacHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpams) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29694,9 +29305,9 @@ func (m *awsEc2query_deserializeOpDescribeMacHosts) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeMacHosts(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpams(response, &metadata) } - output := &DescribeMacHostsOutput{} + output := &DescribeIpamsOutput{} out.Result = output var buff [1024]byte @@ -29717,7 +29328,7 @@ func (m *awsEc2query_deserializeOpDescribeMacHosts) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29731,7 +29342,7 @@ func (m *awsEc2query_deserializeOpDescribeMacHosts) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeMacHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpams(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29764,14 +29375,14 @@ func awsEc2query_deserializeOpErrorDescribeMacHosts(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDescribeManagedPrefixLists struct { +type awsEc2query_deserializeOpDescribeIpamScopes struct { } -func (*awsEc2query_deserializeOpDescribeManagedPrefixLists) ID() string { +func (*awsEc2query_deserializeOpDescribeIpamScopes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeManagedPrefixLists) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpamScopes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29789,9 +29400,9 @@ func (m *awsEc2query_deserializeOpDescribeManagedPrefixLists) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeManagedPrefixLists(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpamScopes(response, &metadata) } - output := &DescribeManagedPrefixListsOutput{} + output := &DescribeIpamScopesOutput{} out.Result = output var buff [1024]byte @@ -29812,7 +29423,7 @@ func (m *awsEc2query_deserializeOpDescribeManagedPrefixLists) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeManagedPrefixListsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpamScopesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29826,7 +29437,7 @@ func (m *awsEc2query_deserializeOpDescribeManagedPrefixLists) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeManagedPrefixLists(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpamScopes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29859,14 +29470,14 @@ func awsEc2query_deserializeOpErrorDescribeManagedPrefixLists(response *smithyht } } -type awsEc2query_deserializeOpDescribeMovingAddresses struct { +type awsEc2query_deserializeOpDescribeIpv6Pools struct { } -func (*awsEc2query_deserializeOpDescribeMovingAddresses) ID() string { +func (*awsEc2query_deserializeOpDescribeIpv6Pools) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeMovingAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeIpv6Pools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29884,9 +29495,9 @@ func (m *awsEc2query_deserializeOpDescribeMovingAddresses) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeMovingAddresses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeIpv6Pools(response, &metadata) } - output := &DescribeMovingAddressesOutput{} + output := &DescribeIpv6PoolsOutput{} out.Result = output var buff [1024]byte @@ -29907,7 +29518,7 @@ func (m *awsEc2query_deserializeOpDescribeMovingAddresses) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeMovingAddressesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeIpv6PoolsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -29921,7 +29532,7 @@ func (m *awsEc2query_deserializeOpDescribeMovingAddresses) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeMovingAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeIpv6Pools(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -29954,14 +29565,14 @@ func awsEc2query_deserializeOpErrorDescribeMovingAddresses(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeNatGateways struct { +type awsEc2query_deserializeOpDescribeKeyPairs struct { } -func (*awsEc2query_deserializeOpDescribeNatGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeKeyPairs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNatGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeKeyPairs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -29979,9 +29590,9 @@ func (m *awsEc2query_deserializeOpDescribeNatGateways) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNatGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeKeyPairs(response, &metadata) } - output := &DescribeNatGatewaysOutput{} + output := &DescribeKeyPairsOutput{} out.Result = output var buff [1024]byte @@ -30002,7 +29613,7 @@ func (m *awsEc2query_deserializeOpDescribeNatGateways) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNatGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeKeyPairsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30016,7 +29627,7 @@ func (m *awsEc2query_deserializeOpDescribeNatGateways) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNatGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeKeyPairs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30049,14 +29660,14 @@ func awsEc2query_deserializeOpErrorDescribeNatGateways(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeNetworkAcls struct { +type awsEc2query_deserializeOpDescribeLaunchTemplates struct { } -func (*awsEc2query_deserializeOpDescribeNetworkAcls) ID() string { +func (*awsEc2query_deserializeOpDescribeLaunchTemplates) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkAcls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLaunchTemplates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30074,9 +29685,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkAcls) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkAcls(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLaunchTemplates(response, &metadata) } - output := &DescribeNetworkAclsOutput{} + output := &DescribeLaunchTemplatesOutput{} out.Result = output var buff [1024]byte @@ -30097,7 +29708,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkAcls) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkAclsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLaunchTemplatesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30111,7 +29722,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkAcls) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkAcls(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLaunchTemplates(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30144,14 +29755,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkAcls(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses struct { +type awsEc2query_deserializeOpDescribeLaunchTemplateVersions struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) ID() string { +func (*awsEc2query_deserializeOpDescribeLaunchTemplateVersions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLaunchTemplateVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30169,9 +29780,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopeAnalyses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLaunchTemplateVersions(response, &metadata) } - output := &DescribeNetworkInsightsAccessScopeAnalysesOutput{} + output := &DescribeLaunchTemplateVersionsOutput{} out.Result = output var buff [1024]byte @@ -30192,7 +29803,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsAccessScopeAnalysesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLaunchTemplateVersionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30206,7 +29817,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopeAnalyses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLaunchTemplateVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30239,14 +29850,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopeAnalyses(re } } -type awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes struct { +type awsEc2query_deserializeOpDescribeLocalGatewayRouteTables struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) ID() string { +func (*awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30264,9 +29875,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTables(response, &metadata) } - output := &DescribeNetworkInsightsAccessScopesOutput{} + output := &DescribeLocalGatewayRouteTablesOutput{} out.Result = output var buff [1024]byte @@ -30287,7 +29898,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsAccessScopesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTablesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30301,7 +29912,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30334,14 +29945,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopes(response } } -type awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses struct { +type awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) ID() string { +func (*awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30359,9 +29970,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsAnalyses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(response, &metadata) } - output := &DescribeNetworkInsightsAnalysesOutput{} + output := &DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput{} out.Result = output var buff [1024]byte @@ -30382,7 +29993,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsAnalysesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30396,7 +30007,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAnalyses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30429,14 +30040,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAnalyses(response *smi } } -type awsEc2query_deserializeOpDescribeNetworkInsightsPaths struct { +type awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInsightsPaths) ID() string { +func (*awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInsightsPaths) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLocalGatewayRouteTableVpcAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30454,9 +30065,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsPaths) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsPaths(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVpcAssociations(response, &metadata) } - output := &DescribeNetworkInsightsPathsOutput{} + output := &DescribeLocalGatewayRouteTableVpcAssociationsOutput{} out.Result = output var buff [1024]byte @@ -30477,7 +30088,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsPaths) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsPathsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30491,7 +30102,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInsightsPaths) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInsightsPaths(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLocalGatewayRouteTableVpcAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30524,14 +30135,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInsightsPaths(response *smithy } } -type awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute struct { +type awsEc2query_deserializeOpDescribeLocalGateways struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeLocalGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLocalGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30549,9 +30160,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInterfaceAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGateways(response, &metadata) } - output := &DescribeNetworkInterfaceAttributeOutput{} + output := &DescribeLocalGatewaysOutput{} out.Result = output var buff [1024]byte @@ -30572,7 +30183,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInterfaceAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLocalGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30586,7 +30197,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInterfaceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLocalGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30619,14 +30230,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInterfaceAttribute(response *s } } -type awsEc2query_deserializeOpDescribeNetworkInterfacePermissions struct { +type awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) ID() string { +func (*awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaceGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30644,9 +30255,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInterfacePermissions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaceGroups(response, &metadata) } - output := &DescribeNetworkInterfacePermissionsOutput{} + output := &DescribeLocalGatewayVirtualInterfaceGroupsOutput{} out.Result = output var buff [1024]byte @@ -30667,7 +30278,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInterfacePermissionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30681,7 +30292,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInterfacePermissions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaceGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30714,14 +30325,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInterfacePermissions(response } } -type awsEc2query_deserializeOpDescribeNetworkInterfaces struct { +type awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces struct { } -func (*awsEc2query_deserializeOpDescribeNetworkInterfaces) ID() string { +func (*awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeNetworkInterfaces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLocalGatewayVirtualInterfaces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30739,9 +30350,9 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfaces) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInterfaces(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaces(response, &metadata) } - output := &DescribeNetworkInterfacesOutput{} + output := &DescribeLocalGatewayVirtualInterfacesOutput{} out.Result = output var buff [1024]byte @@ -30762,7 +30373,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfaces) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeNetworkInterfacesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfacesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30776,7 +30387,7 @@ func (m *awsEc2query_deserializeOpDescribeNetworkInterfaces) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeNetworkInterfaces(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLocalGatewayVirtualInterfaces(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30809,14 +30420,14 @@ func awsEc2query_deserializeOpErrorDescribeNetworkInterfaces(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeOutpostLags struct { +type awsEc2query_deserializeOpDescribeLockedSnapshots struct { } -func (*awsEc2query_deserializeOpDescribeOutpostLags) ID() string { +func (*awsEc2query_deserializeOpDescribeLockedSnapshots) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeOutpostLags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeLockedSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30834,9 +30445,9 @@ func (m *awsEc2query_deserializeOpDescribeOutpostLags) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeOutpostLags(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeLockedSnapshots(response, &metadata) } - output := &DescribeOutpostLagsOutput{} + output := &DescribeLockedSnapshotsOutput{} out.Result = output var buff [1024]byte @@ -30857,7 +30468,7 @@ func (m *awsEc2query_deserializeOpDescribeOutpostLags) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeOutpostLagsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30871,7 +30482,7 @@ func (m *awsEc2query_deserializeOpDescribeOutpostLags) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeOutpostLags(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeLockedSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30904,14 +30515,14 @@ func awsEc2query_deserializeOpErrorDescribeOutpostLags(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribePlacementGroups struct { +type awsEc2query_deserializeOpDescribeMacHosts struct { } -func (*awsEc2query_deserializeOpDescribePlacementGroups) ID() string { +func (*awsEc2query_deserializeOpDescribeMacHosts) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribePlacementGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeMacHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -30929,9 +30540,9 @@ func (m *awsEc2query_deserializeOpDescribePlacementGroups) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribePlacementGroups(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeMacHosts(response, &metadata) } - output := &DescribePlacementGroupsOutput{} + output := &DescribeMacHostsOutput{} out.Result = output var buff [1024]byte @@ -30952,7 +30563,7 @@ func (m *awsEc2query_deserializeOpDescribePlacementGroups) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribePlacementGroupsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -30966,7 +30577,7 @@ func (m *awsEc2query_deserializeOpDescribePlacementGroups) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribePlacementGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeMacHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -30999,14 +30610,14 @@ func awsEc2query_deserializeOpErrorDescribePlacementGroups(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribePrefixLists struct { +type awsEc2query_deserializeOpDescribeMacModificationTasks struct { } -func (*awsEc2query_deserializeOpDescribePrefixLists) ID() string { +func (*awsEc2query_deserializeOpDescribeMacModificationTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribePrefixLists) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeMacModificationTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31024,9 +30635,9 @@ func (m *awsEc2query_deserializeOpDescribePrefixLists) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribePrefixLists(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeMacModificationTasks(response, &metadata) } - output := &DescribePrefixListsOutput{} + output := &DescribeMacModificationTasksOutput{} out.Result = output var buff [1024]byte @@ -31047,7 +30658,7 @@ func (m *awsEc2query_deserializeOpDescribePrefixLists) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribePrefixListsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeMacModificationTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31061,7 +30672,7 @@ func (m *awsEc2query_deserializeOpDescribePrefixLists) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribePrefixLists(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeMacModificationTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31094,14 +30705,14 @@ func awsEc2query_deserializeOpErrorDescribePrefixLists(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribePrincipalIdFormat struct { +type awsEc2query_deserializeOpDescribeManagedPrefixLists struct { } -func (*awsEc2query_deserializeOpDescribePrincipalIdFormat) ID() string { +func (*awsEc2query_deserializeOpDescribeManagedPrefixLists) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribePrincipalIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeManagedPrefixLists) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31119,9 +30730,9 @@ func (m *awsEc2query_deserializeOpDescribePrincipalIdFormat) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribePrincipalIdFormat(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeManagedPrefixLists(response, &metadata) } - output := &DescribePrincipalIdFormatOutput{} + output := &DescribeManagedPrefixListsOutput{} out.Result = output var buff [1024]byte @@ -31142,7 +30753,7 @@ func (m *awsEc2query_deserializeOpDescribePrincipalIdFormat) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribePrincipalIdFormatOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeManagedPrefixListsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31156,7 +30767,7 @@ func (m *awsEc2query_deserializeOpDescribePrincipalIdFormat) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribePrincipalIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeManagedPrefixLists(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31189,14 +30800,14 @@ func awsEc2query_deserializeOpErrorDescribePrincipalIdFormat(response *smithyhtt } } -type awsEc2query_deserializeOpDescribePublicIpv4Pools struct { +type awsEc2query_deserializeOpDescribeMovingAddresses struct { } -func (*awsEc2query_deserializeOpDescribePublicIpv4Pools) ID() string { +func (*awsEc2query_deserializeOpDescribeMovingAddresses) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribePublicIpv4Pools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeMovingAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31214,9 +30825,9 @@ func (m *awsEc2query_deserializeOpDescribePublicIpv4Pools) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribePublicIpv4Pools(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeMovingAddresses(response, &metadata) } - output := &DescribePublicIpv4PoolsOutput{} + output := &DescribeMovingAddressesOutput{} out.Result = output var buff [1024]byte @@ -31237,7 +30848,7 @@ func (m *awsEc2query_deserializeOpDescribePublicIpv4Pools) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribePublicIpv4PoolsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeMovingAddressesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31251,7 +30862,7 @@ func (m *awsEc2query_deserializeOpDescribePublicIpv4Pools) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribePublicIpv4Pools(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeMovingAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31284,14 +30895,14 @@ func awsEc2query_deserializeOpErrorDescribePublicIpv4Pools(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeRegions struct { +type awsEc2query_deserializeOpDescribeNatGateways struct { } -func (*awsEc2query_deserializeOpDescribeRegions) ID() string { +func (*awsEc2query_deserializeOpDescribeNatGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeRegions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNatGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31309,9 +30920,9 @@ func (m *awsEc2query_deserializeOpDescribeRegions) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeRegions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNatGateways(response, &metadata) } - output := &DescribeRegionsOutput{} + output := &DescribeNatGatewaysOutput{} out.Result = output var buff [1024]byte @@ -31332,7 +30943,7 @@ func (m *awsEc2query_deserializeOpDescribeRegions) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeRegionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNatGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31346,7 +30957,7 @@ func (m *awsEc2query_deserializeOpDescribeRegions) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeRegions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNatGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31379,14 +30990,14 @@ func awsEc2query_deserializeOpErrorDescribeRegions(response *smithyhttp.Response } } -type awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks struct { +type awsEc2query_deserializeOpDescribeNetworkAcls struct { } -func (*awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkAcls) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkAcls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31404,9 +31015,9 @@ func (m *awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeReplaceRootVolumeTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkAcls(response, &metadata) } - output := &DescribeReplaceRootVolumeTasksOutput{} + output := &DescribeNetworkAclsOutput{} out.Result = output var buff [1024]byte @@ -31427,7 +31038,7 @@ func (m *awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeReplaceRootVolumeTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkAclsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31441,7 +31052,7 @@ func (m *awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeReplaceRootVolumeTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkAcls(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31474,14 +31085,14 @@ func awsEc2query_deserializeOpErrorDescribeReplaceRootVolumeTasks(response *smit } } -type awsEc2query_deserializeOpDescribeReservedInstances struct { +type awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses struct { } -func (*awsEc2query_deserializeOpDescribeReservedInstances) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeReservedInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopeAnalyses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31499,9 +31110,9 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstances) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopeAnalyses(response, &metadata) } - output := &DescribeReservedInstancesOutput{} + output := &DescribeNetworkInsightsAccessScopeAnalysesOutput{} out.Result = output var buff [1024]byte @@ -31522,7 +31133,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstances) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsAccessScopeAnalysesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31536,7 +31147,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstances) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeReservedInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopeAnalyses(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31569,14 +31180,14 @@ func awsEc2query_deserializeOpErrorDescribeReservedInstances(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeReservedInstancesListings struct { +type awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes struct { } -func (*awsEc2query_deserializeOpDescribeReservedInstancesListings) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeReservedInstancesListings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAccessScopes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31594,9 +31205,9 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesListings) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstancesListings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopes(response, &metadata) } - output := &DescribeReservedInstancesListingsOutput{} + output := &DescribeNetworkInsightsAccessScopesOutput{} out.Result = output var buff [1024]byte @@ -31617,7 +31228,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesListings) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesListingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsAccessScopesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31631,7 +31242,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesListings) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeReservedInstancesListings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAccessScopes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31664,14 +31275,14 @@ func awsEc2query_deserializeOpErrorDescribeReservedInstancesListings(response *s } } -type awsEc2query_deserializeOpDescribeReservedInstancesModifications struct { +type awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses struct { } -func (*awsEc2query_deserializeOpDescribeReservedInstancesModifications) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeReservedInstancesModifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInsightsAnalyses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31689,9 +31300,9 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesModifications) Handle } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstancesModifications(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsAnalyses(response, &metadata) } - output := &DescribeReservedInstancesModificationsOutput{} + output := &DescribeNetworkInsightsAnalysesOutput{} out.Result = output var buff [1024]byte @@ -31712,7 +31323,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesModifications) Handle } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesModificationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsAnalysesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31726,7 +31337,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesModifications) Handle return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeReservedInstancesModifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInsightsAnalyses(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31759,14 +31370,14 @@ func awsEc2query_deserializeOpErrorDescribeReservedInstancesModifications(respon } } -type awsEc2query_deserializeOpDescribeReservedInstancesOfferings struct { +type awsEc2query_deserializeOpDescribeNetworkInsightsPaths struct { } -func (*awsEc2query_deserializeOpDescribeReservedInstancesOfferings) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInsightsPaths) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeReservedInstancesOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInsightsPaths) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31784,9 +31395,9 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesOfferings) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstancesOfferings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInsightsPaths(response, &metadata) } - output := &DescribeReservedInstancesOfferingsOutput{} + output := &DescribeNetworkInsightsPathsOutput{} out.Result = output var buff [1024]byte @@ -31807,7 +31418,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesOfferings) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesOfferingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInsightsPathsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31821,7 +31432,7 @@ func (m *awsEc2query_deserializeOpDescribeReservedInstancesOfferings) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeReservedInstancesOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInsightsPaths(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31854,14 +31465,14 @@ func awsEc2query_deserializeOpErrorDescribeReservedInstancesOfferings(response * } } -type awsEc2query_deserializeOpDescribeRouteServerEndpoints struct { +type awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute struct { } -func (*awsEc2query_deserializeOpDescribeRouteServerEndpoints) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeRouteServerEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInterfaceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31879,9 +31490,9 @@ func (m *awsEc2query_deserializeOpDescribeRouteServerEndpoints) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteServerEndpoints(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInterfaceAttribute(response, &metadata) } - output := &DescribeRouteServerEndpointsOutput{} + output := &DescribeNetworkInterfaceAttributeOutput{} out.Result = output var buff [1024]byte @@ -31902,7 +31513,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteServerEndpoints) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeRouteServerEndpointsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInterfaceAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -31916,7 +31527,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteServerEndpoints) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeRouteServerEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInterfaceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -31949,14 +31560,14 @@ func awsEc2query_deserializeOpErrorDescribeRouteServerEndpoints(response *smithy } } -type awsEc2query_deserializeOpDescribeRouteServerPeers struct { +type awsEc2query_deserializeOpDescribeNetworkInterfacePermissions struct { } -func (*awsEc2query_deserializeOpDescribeRouteServerPeers) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeRouteServerPeers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInterfacePermissions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -31974,9 +31585,9 @@ func (m *awsEc2query_deserializeOpDescribeRouteServerPeers) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteServerPeers(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInterfacePermissions(response, &metadata) } - output := &DescribeRouteServerPeersOutput{} + output := &DescribeNetworkInterfacePermissionsOutput{} out.Result = output var buff [1024]byte @@ -31997,7 +31608,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteServerPeers) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeRouteServerPeersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInterfacePermissionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32011,7 +31622,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteServerPeers) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeRouteServerPeers(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInterfacePermissions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32044,14 +31655,14 @@ func awsEc2query_deserializeOpErrorDescribeRouteServerPeers(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeRouteServers struct { +type awsEc2query_deserializeOpDescribeNetworkInterfaces struct { } -func (*awsEc2query_deserializeOpDescribeRouteServers) ID() string { +func (*awsEc2query_deserializeOpDescribeNetworkInterfaces) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeRouteServers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeNetworkInterfaces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32069,9 +31680,9 @@ func (m *awsEc2query_deserializeOpDescribeRouteServers) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteServers(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeNetworkInterfaces(response, &metadata) } - output := &DescribeRouteServersOutput{} + output := &DescribeNetworkInterfacesOutput{} out.Result = output var buff [1024]byte @@ -32092,7 +31703,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteServers) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeRouteServersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeNetworkInterfacesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32106,7 +31717,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteServers) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeRouteServers(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeNetworkInterfaces(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32139,14 +31750,14 @@ func awsEc2query_deserializeOpErrorDescribeRouteServers(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDescribeRouteTables struct { +type awsEc2query_deserializeOpDescribeOutpostLags struct { } -func (*awsEc2query_deserializeOpDescribeRouteTables) ID() string { +func (*awsEc2query_deserializeOpDescribeOutpostLags) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeRouteTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeOutpostLags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32164,9 +31775,9 @@ func (m *awsEc2query_deserializeOpDescribeRouteTables) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteTables(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeOutpostLags(response, &metadata) } - output := &DescribeRouteTablesOutput{} + output := &DescribeOutpostLagsOutput{} out.Result = output var buff [1024]byte @@ -32187,7 +31798,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteTables) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeRouteTablesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeOutpostLagsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32201,7 +31812,7 @@ func (m *awsEc2query_deserializeOpDescribeRouteTables) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeRouteTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeOutpostLags(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32234,14 +31845,14 @@ func awsEc2query_deserializeOpErrorDescribeRouteTables(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDescribeScheduledInstanceAvailability struct { +type awsEc2query_deserializeOpDescribePlacementGroups struct { } -func (*awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) ID() string { +func (*awsEc2query_deserializeOpDescribePlacementGroups) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribePlacementGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32259,9 +31870,9 @@ func (m *awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeScheduledInstanceAvailability(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribePlacementGroups(response, &metadata) } - output := &DescribeScheduledInstanceAvailabilityOutput{} + output := &DescribePlacementGroupsOutput{} out.Result = output var buff [1024]byte @@ -32282,7 +31893,7 @@ func (m *awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeScheduledInstanceAvailabilityOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribePlacementGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32296,7 +31907,7 @@ func (m *awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeScheduledInstanceAvailability(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribePlacementGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32329,14 +31940,14 @@ func awsEc2query_deserializeOpErrorDescribeScheduledInstanceAvailability(respons } } -type awsEc2query_deserializeOpDescribeScheduledInstances struct { +type awsEc2query_deserializeOpDescribePrefixLists struct { } -func (*awsEc2query_deserializeOpDescribeScheduledInstances) ID() string { +func (*awsEc2query_deserializeOpDescribePrefixLists) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeScheduledInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribePrefixLists) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32354,9 +31965,9 @@ func (m *awsEc2query_deserializeOpDescribeScheduledInstances) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeScheduledInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribePrefixLists(response, &metadata) } - output := &DescribeScheduledInstancesOutput{} + output := &DescribePrefixListsOutput{} out.Result = output var buff [1024]byte @@ -32377,7 +31988,7 @@ func (m *awsEc2query_deserializeOpDescribeScheduledInstances) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeScheduledInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribePrefixListsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32391,7 +32002,7 @@ func (m *awsEc2query_deserializeOpDescribeScheduledInstances) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeScheduledInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribePrefixLists(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32424,14 +32035,14 @@ func awsEc2query_deserializeOpErrorDescribeScheduledInstances(response *smithyht } } -type awsEc2query_deserializeOpDescribeSecurityGroupReferences struct { +type awsEc2query_deserializeOpDescribePrincipalIdFormat struct { } -func (*awsEc2query_deserializeOpDescribeSecurityGroupReferences) ID() string { +func (*awsEc2query_deserializeOpDescribePrincipalIdFormat) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSecurityGroupReferences) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribePrincipalIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32449,9 +32060,9 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupReferences) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroupReferences(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribePrincipalIdFormat(response, &metadata) } - output := &DescribeSecurityGroupReferencesOutput{} + output := &DescribePrincipalIdFormatOutput{} out.Result = output var buff [1024]byte @@ -32472,7 +32083,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupReferences) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupReferencesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribePrincipalIdFormatOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32486,7 +32097,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupReferences) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSecurityGroupReferences(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribePrincipalIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32519,14 +32130,14 @@ func awsEc2query_deserializeOpErrorDescribeSecurityGroupReferences(response *smi } } -type awsEc2query_deserializeOpDescribeSecurityGroupRules struct { +type awsEc2query_deserializeOpDescribePublicIpv4Pools struct { } -func (*awsEc2query_deserializeOpDescribeSecurityGroupRules) ID() string { +func (*awsEc2query_deserializeOpDescribePublicIpv4Pools) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSecurityGroupRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribePublicIpv4Pools) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32544,9 +32155,9 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupRules) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroupRules(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribePublicIpv4Pools(response, &metadata) } - output := &DescribeSecurityGroupRulesOutput{} + output := &DescribePublicIpv4PoolsOutput{} out.Result = output var buff [1024]byte @@ -32567,7 +32178,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupRules) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupRulesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribePublicIpv4PoolsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32581,7 +32192,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupRules) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSecurityGroupRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribePublicIpv4Pools(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32614,14 +32225,14 @@ func awsEc2query_deserializeOpErrorDescribeSecurityGroupRules(response *smithyht } } -type awsEc2query_deserializeOpDescribeSecurityGroups struct { +type awsEc2query_deserializeOpDescribeRegions struct { } -func (*awsEc2query_deserializeOpDescribeSecurityGroups) ID() string { +func (*awsEc2query_deserializeOpDescribeRegions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSecurityGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeRegions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32639,9 +32250,9 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroups) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroups(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeRegions(response, &metadata) } - output := &DescribeSecurityGroupsOutput{} + output := &DescribeRegionsOutput{} out.Result = output var buff [1024]byte @@ -32662,7 +32273,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroups) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeRegionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32676,7 +32287,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroups) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSecurityGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeRegions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32709,14 +32320,14 @@ func awsEc2query_deserializeOpErrorDescribeSecurityGroups(response *smithyhttp.R } } -type awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations struct { +type awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks struct { } -func (*awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeReplaceRootVolumeTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32734,9 +32345,9 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroupVpcAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeReplaceRootVolumeTasks(response, &metadata) } - output := &DescribeSecurityGroupVpcAssociationsOutput{} + output := &DescribeReplaceRootVolumeTasksOutput{} out.Result = output var buff [1024]byte @@ -32757,7 +32368,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupVpcAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeReplaceRootVolumeTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32771,7 +32382,7 @@ func (m *awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSecurityGroupVpcAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeReplaceRootVolumeTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32804,14 +32415,14 @@ func awsEc2query_deserializeOpErrorDescribeSecurityGroupVpcAssociations(response } } -type awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces struct { +type awsEc2query_deserializeOpDescribeReservedInstances struct { } -func (*awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) ID() string { +func (*awsEc2query_deserializeOpDescribeReservedInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeReservedInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32829,9 +32440,9 @@ func (m *awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeServiceLinkVirtualInterfaces(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstances(response, &metadata) } - output := &DescribeServiceLinkVirtualInterfacesOutput{} + output := &DescribeReservedInstancesOutput{} out.Result = output var buff [1024]byte @@ -32852,7 +32463,7 @@ func (m *awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeServiceLinkVirtualInterfacesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32866,7 +32477,7 @@ func (m *awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeServiceLinkVirtualInterfaces(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeReservedInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32899,14 +32510,14 @@ func awsEc2query_deserializeOpErrorDescribeServiceLinkVirtualInterfaces(response } } -type awsEc2query_deserializeOpDescribeSnapshotAttribute struct { +type awsEc2query_deserializeOpDescribeReservedInstancesListings struct { } -func (*awsEc2query_deserializeOpDescribeSnapshotAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeReservedInstancesListings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSnapshotAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeReservedInstancesListings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -32924,9 +32535,9 @@ func (m *awsEc2query_deserializeOpDescribeSnapshotAttribute) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSnapshotAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstancesListings(response, &metadata) } - output := &DescribeSnapshotAttributeOutput{} + output := &DescribeReservedInstancesListingsOutput{} out.Result = output var buff [1024]byte @@ -32947,7 +32558,7 @@ func (m *awsEc2query_deserializeOpDescribeSnapshotAttribute) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSnapshotAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesListingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -32961,7 +32572,7 @@ func (m *awsEc2query_deserializeOpDescribeSnapshotAttribute) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSnapshotAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeReservedInstancesListings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -32994,14 +32605,14 @@ func awsEc2query_deserializeOpErrorDescribeSnapshotAttribute(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeSnapshots struct { +type awsEc2query_deserializeOpDescribeReservedInstancesModifications struct { } -func (*awsEc2query_deserializeOpDescribeSnapshots) ID() string { +func (*awsEc2query_deserializeOpDescribeReservedInstancesModifications) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeReservedInstancesModifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33019,9 +32630,9 @@ func (m *awsEc2query_deserializeOpDescribeSnapshots) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSnapshots(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstancesModifications(response, &metadata) } - output := &DescribeSnapshotsOutput{} + output := &DescribeReservedInstancesModificationsOutput{} out.Result = output var buff [1024]byte @@ -33042,7 +32653,7 @@ func (m *awsEc2query_deserializeOpDescribeSnapshots) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSnapshotsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesModificationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33056,7 +32667,7 @@ func (m *awsEc2query_deserializeOpDescribeSnapshots) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeReservedInstancesModifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33089,14 +32700,14 @@ func awsEc2query_deserializeOpErrorDescribeSnapshots(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDescribeSnapshotTierStatus struct { +type awsEc2query_deserializeOpDescribeReservedInstancesOfferings struct { } -func (*awsEc2query_deserializeOpDescribeSnapshotTierStatus) ID() string { +func (*awsEc2query_deserializeOpDescribeReservedInstancesOfferings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSnapshotTierStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeReservedInstancesOfferings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33114,9 +32725,9 @@ func (m *awsEc2query_deserializeOpDescribeSnapshotTierStatus) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSnapshotTierStatus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeReservedInstancesOfferings(response, &metadata) } - output := &DescribeSnapshotTierStatusOutput{} + output := &DescribeReservedInstancesOfferingsOutput{} out.Result = output var buff [1024]byte @@ -33137,7 +32748,7 @@ func (m *awsEc2query_deserializeOpDescribeSnapshotTierStatus) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSnapshotTierStatusOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeReservedInstancesOfferingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33151,7 +32762,7 @@ func (m *awsEc2query_deserializeOpDescribeSnapshotTierStatus) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSnapshotTierStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeReservedInstancesOfferings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33184,14 +32795,14 @@ func awsEc2query_deserializeOpErrorDescribeSnapshotTierStatus(response *smithyht } } -type awsEc2query_deserializeOpDescribeSpotDatafeedSubscription struct { +type awsEc2query_deserializeOpDescribeRouteServerEndpoints struct { } -func (*awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) ID() string { +func (*awsEc2query_deserializeOpDescribeRouteServerEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeRouteServerEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33209,9 +32820,9 @@ func (m *awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotDatafeedSubscription(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteServerEndpoints(response, &metadata) } - output := &DescribeSpotDatafeedSubscriptionOutput{} + output := &DescribeRouteServerEndpointsOutput{} out.Result = output var buff [1024]byte @@ -33232,7 +32843,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSpotDatafeedSubscriptionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeRouteServerEndpointsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33246,7 +32857,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSpotDatafeedSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeRouteServerEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33279,14 +32890,14 @@ func awsEc2query_deserializeOpErrorDescribeSpotDatafeedSubscription(response *sm } } -type awsEc2query_deserializeOpDescribeSpotFleetInstances struct { +type awsEc2query_deserializeOpDescribeRouteServerPeers struct { } -func (*awsEc2query_deserializeOpDescribeSpotFleetInstances) ID() string { +func (*awsEc2query_deserializeOpDescribeRouteServerPeers) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSpotFleetInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeRouteServerPeers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33304,9 +32915,9 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetInstances) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotFleetInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteServerPeers(response, &metadata) } - output := &DescribeSpotFleetInstancesOutput{} + output := &DescribeRouteServerPeersOutput{} out.Result = output var buff [1024]byte @@ -33327,7 +32938,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetInstances) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSpotFleetInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeRouteServerPeersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33341,7 +32952,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetInstances) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSpotFleetInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeRouteServerPeers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33374,14 +32985,14 @@ func awsEc2query_deserializeOpErrorDescribeSpotFleetInstances(response *smithyht } } -type awsEc2query_deserializeOpDescribeSpotFleetRequestHistory struct { +type awsEc2query_deserializeOpDescribeRouteServers struct { } -func (*awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) ID() string { +func (*awsEc2query_deserializeOpDescribeRouteServers) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeRouteServers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33399,9 +33010,9 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotFleetRequestHistory(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteServers(response, &metadata) } - output := &DescribeSpotFleetRequestHistoryOutput{} + output := &DescribeRouteServersOutput{} out.Result = output var buff [1024]byte @@ -33422,7 +33033,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSpotFleetRequestHistoryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeRouteServersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33436,7 +33047,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSpotFleetRequestHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeRouteServers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33469,14 +33080,14 @@ func awsEc2query_deserializeOpErrorDescribeSpotFleetRequestHistory(response *smi } } -type awsEc2query_deserializeOpDescribeSpotFleetRequests struct { +type awsEc2query_deserializeOpDescribeRouteTables struct { } -func (*awsEc2query_deserializeOpDescribeSpotFleetRequests) ID() string { +func (*awsEc2query_deserializeOpDescribeRouteTables) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSpotFleetRequests) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeRouteTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33494,9 +33105,9 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetRequests) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotFleetRequests(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeRouteTables(response, &metadata) } - output := &DescribeSpotFleetRequestsOutput{} + output := &DescribeRouteTablesOutput{} out.Result = output var buff [1024]byte @@ -33517,7 +33128,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetRequests) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSpotFleetRequestsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeRouteTablesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33531,7 +33142,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotFleetRequests) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSpotFleetRequests(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeRouteTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33564,14 +33175,14 @@ func awsEc2query_deserializeOpErrorDescribeSpotFleetRequests(response *smithyhtt } } -type awsEc2query_deserializeOpDescribeSpotInstanceRequests struct { +type awsEc2query_deserializeOpDescribeScheduledInstanceAvailability struct { } -func (*awsEc2query_deserializeOpDescribeSpotInstanceRequests) ID() string { +func (*awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSpotInstanceRequests) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeScheduledInstanceAvailability) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33589,9 +33200,9 @@ func (m *awsEc2query_deserializeOpDescribeSpotInstanceRequests) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotInstanceRequests(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeScheduledInstanceAvailability(response, &metadata) } - output := &DescribeSpotInstanceRequestsOutput{} + output := &DescribeScheduledInstanceAvailabilityOutput{} out.Result = output var buff [1024]byte @@ -33612,7 +33223,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotInstanceRequests) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSpotInstanceRequestsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeScheduledInstanceAvailabilityOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33626,7 +33237,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotInstanceRequests) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSpotInstanceRequests(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeScheduledInstanceAvailability(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33659,14 +33270,14 @@ func awsEc2query_deserializeOpErrorDescribeSpotInstanceRequests(response *smithy } } -type awsEc2query_deserializeOpDescribeSpotPriceHistory struct { +type awsEc2query_deserializeOpDescribeScheduledInstances struct { } -func (*awsEc2query_deserializeOpDescribeSpotPriceHistory) ID() string { +func (*awsEc2query_deserializeOpDescribeScheduledInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSpotPriceHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeScheduledInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33684,9 +33295,9 @@ func (m *awsEc2query_deserializeOpDescribeSpotPriceHistory) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotPriceHistory(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeScheduledInstances(response, &metadata) } - output := &DescribeSpotPriceHistoryOutput{} + output := &DescribeScheduledInstancesOutput{} out.Result = output var buff [1024]byte @@ -33707,7 +33318,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotPriceHistory) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSpotPriceHistoryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeScheduledInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33721,7 +33332,7 @@ func (m *awsEc2query_deserializeOpDescribeSpotPriceHistory) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSpotPriceHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeScheduledInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33754,14 +33365,14 @@ func awsEc2query_deserializeOpErrorDescribeSpotPriceHistory(response *smithyhttp } } -type awsEc2query_deserializeOpDescribeStaleSecurityGroups struct { +type awsEc2query_deserializeOpDescribeSecurityGroupReferences struct { } -func (*awsEc2query_deserializeOpDescribeStaleSecurityGroups) ID() string { +func (*awsEc2query_deserializeOpDescribeSecurityGroupReferences) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeStaleSecurityGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSecurityGroupReferences) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33779,9 +33390,9 @@ func (m *awsEc2query_deserializeOpDescribeStaleSecurityGroups) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeStaleSecurityGroups(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroupReferences(response, &metadata) } - output := &DescribeStaleSecurityGroupsOutput{} + output := &DescribeSecurityGroupReferencesOutput{} out.Result = output var buff [1024]byte @@ -33802,7 +33413,7 @@ func (m *awsEc2query_deserializeOpDescribeStaleSecurityGroups) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeStaleSecurityGroupsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupReferencesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33816,7 +33427,7 @@ func (m *awsEc2query_deserializeOpDescribeStaleSecurityGroups) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeStaleSecurityGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSecurityGroupReferences(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33849,14 +33460,14 @@ func awsEc2query_deserializeOpErrorDescribeStaleSecurityGroups(response *smithyh } } -type awsEc2query_deserializeOpDescribeStoreImageTasks struct { +type awsEc2query_deserializeOpDescribeSecurityGroupRules struct { } -func (*awsEc2query_deserializeOpDescribeStoreImageTasks) ID() string { +func (*awsEc2query_deserializeOpDescribeSecurityGroupRules) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeStoreImageTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSecurityGroupRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33874,9 +33485,9 @@ func (m *awsEc2query_deserializeOpDescribeStoreImageTasks) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeStoreImageTasks(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroupRules(response, &metadata) } - output := &DescribeStoreImageTasksOutput{} + output := &DescribeSecurityGroupRulesOutput{} out.Result = output var buff [1024]byte @@ -33897,7 +33508,7 @@ func (m *awsEc2query_deserializeOpDescribeStoreImageTasks) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeStoreImageTasksOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupRulesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -33911,7 +33522,7 @@ func (m *awsEc2query_deserializeOpDescribeStoreImageTasks) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeStoreImageTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSecurityGroupRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -33944,14 +33555,14 @@ func awsEc2query_deserializeOpErrorDescribeStoreImageTasks(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeSubnets struct { +type awsEc2query_deserializeOpDescribeSecurityGroups struct { } -func (*awsEc2query_deserializeOpDescribeSubnets) ID() string { +func (*awsEc2query_deserializeOpDescribeSecurityGroups) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeSubnets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSecurityGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -33969,9 +33580,9 @@ func (m *awsEc2query_deserializeOpDescribeSubnets) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeSubnets(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroups(response, &metadata) } - output := &DescribeSubnetsOutput{} + output := &DescribeSecurityGroupsOutput{} out.Result = output var buff [1024]byte @@ -33992,7 +33603,7 @@ func (m *awsEc2query_deserializeOpDescribeSubnets) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeSubnetsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34006,7 +33617,7 @@ func (m *awsEc2query_deserializeOpDescribeSubnets) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeSubnets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSecurityGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34039,14 +33650,14 @@ func awsEc2query_deserializeOpErrorDescribeSubnets(response *smithyhttp.Response } } -type awsEc2query_deserializeOpDescribeTags struct { +type awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations struct { } -func (*awsEc2query_deserializeOpDescribeTags) ID() string { +func (*awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSecurityGroupVpcAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34064,9 +33675,9 @@ func (m *awsEc2query_deserializeOpDescribeTags) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTags(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSecurityGroupVpcAssociations(response, &metadata) } - output := &DescribeTagsOutput{} + output := &DescribeSecurityGroupVpcAssociationsOutput{} out.Result = output var buff [1024]byte @@ -34087,7 +33698,7 @@ func (m *awsEc2query_deserializeOpDescribeTags) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTagsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSecurityGroupVpcAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34101,7 +33712,7 @@ func (m *awsEc2query_deserializeOpDescribeTags) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSecurityGroupVpcAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34134,14 +33745,14 @@ func awsEc2query_deserializeOpErrorDescribeTags(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules struct { +type awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces struct { } -func (*awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) ID() string { +func (*awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeServiceLinkVirtualInterfaces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34159,9 +33770,9 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilterRules(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeServiceLinkVirtualInterfaces(response, &metadata) } - output := &DescribeTrafficMirrorFilterRulesOutput{} + output := &DescribeServiceLinkVirtualInterfacesOutput{} out.Result = output var buff [1024]byte @@ -34182,7 +33793,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorFilterRulesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeServiceLinkVirtualInterfacesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34196,7 +33807,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilterRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeServiceLinkVirtualInterfaces(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34229,14 +33840,14 @@ func awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilterRules(response *sm } } -type awsEc2query_deserializeOpDescribeTrafficMirrorFilters struct { +type awsEc2query_deserializeOpDescribeSnapshotAttribute struct { } -func (*awsEc2query_deserializeOpDescribeTrafficMirrorFilters) ID() string { +func (*awsEc2query_deserializeOpDescribeSnapshotAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSnapshotAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34254,9 +33865,9 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilters) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilters(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSnapshotAttribute(response, &metadata) } - output := &DescribeTrafficMirrorFiltersOutput{} + output := &DescribeSnapshotAttributeOutput{} out.Result = output var buff [1024]byte @@ -34277,7 +33888,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilters) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorFiltersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSnapshotAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34291,7 +33902,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilters) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilters(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSnapshotAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34324,14 +33935,14 @@ func awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilters(response *smithy } } -type awsEc2query_deserializeOpDescribeTrafficMirrorSessions struct { +type awsEc2query_deserializeOpDescribeSnapshots struct { } -func (*awsEc2query_deserializeOpDescribeTrafficMirrorSessions) ID() string { +func (*awsEc2query_deserializeOpDescribeSnapshots) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTrafficMirrorSessions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34349,9 +33960,9 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorSessions) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorSessions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSnapshots(response, &metadata) } - output := &DescribeTrafficMirrorSessionsOutput{} + output := &DescribeSnapshotsOutput{} out.Result = output var buff [1024]byte @@ -34372,7 +33983,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorSessions) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorSessionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSnapshotsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34386,7 +33997,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorSessions) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTrafficMirrorSessions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34419,14 +34030,14 @@ func awsEc2query_deserializeOpErrorDescribeTrafficMirrorSessions(response *smith } } -type awsEc2query_deserializeOpDescribeTrafficMirrorTargets struct { +type awsEc2query_deserializeOpDescribeSnapshotTierStatus struct { } -func (*awsEc2query_deserializeOpDescribeTrafficMirrorTargets) ID() string { +func (*awsEc2query_deserializeOpDescribeSnapshotTierStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTrafficMirrorTargets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSnapshotTierStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34444,9 +34055,9 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorTargets) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorTargets(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSnapshotTierStatus(response, &metadata) } - output := &DescribeTrafficMirrorTargetsOutput{} + output := &DescribeSnapshotTierStatusOutput{} out.Result = output var buff [1024]byte @@ -34467,7 +34078,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorTargets) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorTargetsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSnapshotTierStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34481,7 +34092,7 @@ func (m *awsEc2query_deserializeOpDescribeTrafficMirrorTargets) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTrafficMirrorTargets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSnapshotTierStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34514,14 +34125,14 @@ func awsEc2query_deserializeOpErrorDescribeTrafficMirrorTargets(response *smithy } } -type awsEc2query_deserializeOpDescribeTransitGatewayAttachments struct { +type awsEc2query_deserializeOpDescribeSpotDatafeedSubscription struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayAttachments) ID() string { +func (*awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayAttachments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSpotDatafeedSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34539,9 +34150,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayAttachments) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayAttachments(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotDatafeedSubscription(response, &metadata) } - output := &DescribeTransitGatewayAttachmentsOutput{} + output := &DescribeSpotDatafeedSubscriptionOutput{} out.Result = output var buff [1024]byte @@ -34562,7 +34173,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayAttachments) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayAttachmentsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSpotDatafeedSubscriptionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34576,7 +34187,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayAttachments) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayAttachments(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSpotDatafeedSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34609,14 +34220,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayAttachments(response *s } } -type awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers struct { +type awsEc2query_deserializeOpDescribeSpotFleetInstances struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) ID() string { +func (*awsEc2query_deserializeOpDescribeSpotFleetInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSpotFleetInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34634,9 +34245,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayConnectPeers(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotFleetInstances(response, &metadata) } - output := &DescribeTransitGatewayConnectPeersOutput{} + output := &DescribeSpotFleetInstancesOutput{} out.Result = output var buff [1024]byte @@ -34657,7 +34268,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayConnectPeersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSpotFleetInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34671,7 +34282,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayConnectPeers(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSpotFleetInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34704,14 +34315,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayConnectPeers(response * } } -type awsEc2query_deserializeOpDescribeTransitGatewayConnects struct { +type awsEc2query_deserializeOpDescribeSpotFleetRequestHistory struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayConnects) ID() string { +func (*awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSpotFleetRequestHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34729,9 +34340,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnects) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayConnects(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotFleetRequestHistory(response, &metadata) } - output := &DescribeTransitGatewayConnectsOutput{} + output := &DescribeSpotFleetRequestHistoryOutput{} out.Result = output var buff [1024]byte @@ -34752,7 +34363,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnects) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayConnectsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSpotFleetRequestHistoryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34766,7 +34377,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnects) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayConnects(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSpotFleetRequestHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34799,14 +34410,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayConnects(response *smit } } -type awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains struct { +type awsEc2query_deserializeOpDescribeSpotFleetRequests struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) ID() string { +func (*awsEc2query_deserializeOpDescribeSpotFleetRequests) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSpotFleetRequests) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34824,9 +34435,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) Handle } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayMulticastDomains(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotFleetRequests(response, &metadata) } - output := &DescribeTransitGatewayMulticastDomainsOutput{} + output := &DescribeSpotFleetRequestsOutput{} out.Result = output var buff [1024]byte @@ -34847,7 +34458,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) Handle } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayMulticastDomainsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSpotFleetRequestsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34861,7 +34472,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) Handle return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayMulticastDomains(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSpotFleetRequests(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34894,14 +34505,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayMulticastDomains(respon } } -type awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments struct { +type awsEc2query_deserializeOpDescribeSpotInstanceRequests struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) ID() string { +func (*awsEc2query_deserializeOpDescribeSpotInstanceRequests) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSpotInstanceRequests) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -34919,9 +34530,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayPeeringAttachments(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotInstanceRequests(response, &metadata) } - output := &DescribeTransitGatewayPeeringAttachmentsOutput{} + output := &DescribeSpotInstanceRequestsOutput{} out.Result = output var buff [1024]byte @@ -34942,7 +34553,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayPeeringAttachmentsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSpotInstanceRequestsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -34956,7 +34567,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayPeeringAttachments(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSpotInstanceRequests(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -34989,14 +34600,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayPeeringAttachments(resp } } -type awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables struct { +type awsEc2query_deserializeOpDescribeSpotPriceHistory struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) ID() string { +func (*awsEc2query_deserializeOpDescribeSpotPriceHistory) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSpotPriceHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35014,9 +34625,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayPolicyTables(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSpotPriceHistory(response, &metadata) } - output := &DescribeTransitGatewayPolicyTablesOutput{} + output := &DescribeSpotPriceHistoryOutput{} out.Result = output var buff [1024]byte @@ -35037,7 +34648,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayPolicyTablesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSpotPriceHistoryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35051,7 +34662,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayPolicyTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSpotPriceHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35084,14 +34695,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayPolicyTables(response * } } -type awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements struct { +type awsEc2query_deserializeOpDescribeStaleSecurityGroups struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) ID() string { +func (*awsEc2query_deserializeOpDescribeStaleSecurityGroups) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeStaleSecurityGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35109,9 +34720,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTableAnnouncements(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeStaleSecurityGroups(response, &metadata) } - output := &DescribeTransitGatewayRouteTableAnnouncementsOutput{} + output := &DescribeStaleSecurityGroupsOutput{} out.Result = output var buff [1024]byte @@ -35132,7 +34743,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayRouteTableAnnouncementsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeStaleSecurityGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35146,7 +34757,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTableAnnouncements(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeStaleSecurityGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35179,14 +34790,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTableAnnouncements } } -type awsEc2query_deserializeOpDescribeTransitGatewayRouteTables struct { +type awsEc2query_deserializeOpDescribeStoreImageTasks struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) ID() string { +func (*awsEc2query_deserializeOpDescribeStoreImageTasks) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeStoreImageTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35204,9 +34815,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTables(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeStoreImageTasks(response, &metadata) } - output := &DescribeTransitGatewayRouteTablesOutput{} + output := &DescribeStoreImageTasksOutput{} out.Result = output var buff [1024]byte @@ -35227,7 +34838,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayRouteTablesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeStoreImageTasksOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35241,7 +34852,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeStoreImageTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35274,14 +34885,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTables(response *s } } -type awsEc2query_deserializeOpDescribeTransitGateways struct { +type awsEc2query_deserializeOpDescribeSubnets struct { } -func (*awsEc2query_deserializeOpDescribeTransitGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeSubnets) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeSubnets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35299,9 +34910,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGateways) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeSubnets(response, &metadata) } - output := &DescribeTransitGatewaysOutput{} + output := &DescribeSubnetsOutput{} out.Result = output var buff [1024]byte @@ -35322,7 +34933,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGateways) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeSubnetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35336,7 +34947,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGateways) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeSubnets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35369,14 +34980,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGateways(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments struct { +type awsEc2query_deserializeOpDescribeTags struct { } -func (*awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) ID() string { +func (*awsEc2query_deserializeOpDescribeTags) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35394,9 +35005,9 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayVpcAttachments(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTags(response, &metadata) } - output := &DescribeTransitGatewayVpcAttachmentsOutput{} + output := &DescribeTagsOutput{} out.Result = output var buff [1024]byte @@ -35417,7 +35028,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayVpcAttachmentsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTagsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35431,7 +35042,7 @@ func (m *awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTransitGatewayVpcAttachments(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35464,14 +35075,14 @@ func awsEc2query_deserializeOpErrorDescribeTransitGatewayVpcAttachments(response } } -type awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations struct { +type awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules struct { } -func (*awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilterRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35489,9 +35100,9 @@ func (m *awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeTrunkInterfaceAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilterRules(response, &metadata) } - output := &DescribeTrunkInterfaceAssociationsOutput{} + output := &DescribeTrafficMirrorFilterRulesOutput{} out.Result = output var buff [1024]byte @@ -35512,7 +35123,7 @@ func (m *awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeTrunkInterfaceAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorFilterRulesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35526,7 +35137,7 @@ func (m *awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeTrunkInterfaceAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilterRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35559,14 +35170,14 @@ func awsEc2query_deserializeOpErrorDescribeTrunkInterfaceAssociations(response * } } -type awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints struct { +type awsEc2query_deserializeOpDescribeTrafficMirrorFilters struct { } -func (*awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) ID() string { +func (*awsEc2query_deserializeOpDescribeTrafficMirrorFilters) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTrafficMirrorFilters) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35584,9 +35195,9 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessEndpoints(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilters(response, &metadata) } - output := &DescribeVerifiedAccessEndpointsOutput{} + output := &DescribeTrafficMirrorFiltersOutput{} out.Result = output var buff [1024]byte @@ -35607,7 +35218,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessEndpointsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorFiltersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35621,7 +35232,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVerifiedAccessEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTrafficMirrorFilters(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35654,14 +35265,14 @@ func awsEc2query_deserializeOpErrorDescribeVerifiedAccessEndpoints(response *smi } } -type awsEc2query_deserializeOpDescribeVerifiedAccessGroups struct { +type awsEc2query_deserializeOpDescribeTrafficMirrorSessions struct { } -func (*awsEc2query_deserializeOpDescribeVerifiedAccessGroups) ID() string { +func (*awsEc2query_deserializeOpDescribeTrafficMirrorSessions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVerifiedAccessGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTrafficMirrorSessions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35679,9 +35290,9 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessGroups) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessGroups(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorSessions(response, &metadata) } - output := &DescribeVerifiedAccessGroupsOutput{} + output := &DescribeTrafficMirrorSessionsOutput{} out.Result = output var buff [1024]byte @@ -35702,7 +35313,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessGroups) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessGroupsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorSessionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35716,7 +35327,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessGroups) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVerifiedAccessGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTrafficMirrorSessions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35749,14 +35360,14 @@ func awsEc2query_deserializeOpErrorDescribeVerifiedAccessGroups(response *smithy } } -type awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigurations struct { +type awsEc2query_deserializeOpDescribeTrafficMirrorTargets struct { } -func (*awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) ID() string { +func (*awsEc2query_deserializeOpDescribeTrafficMirrorTargets) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTrafficMirrorTargets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35774,9 +35385,9 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigura } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstanceLoggingConfigurations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTrafficMirrorTargets(response, &metadata) } - output := &DescribeVerifiedAccessInstanceLoggingConfigurationsOutput{} + output := &DescribeTrafficMirrorTargetsOutput{} out.Result = output var buff [1024]byte @@ -35797,7 +35408,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigura } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessInstanceLoggingConfigurationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTrafficMirrorTargetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35811,7 +35422,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigura return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstanceLoggingConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTrafficMirrorTargets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35844,14 +35455,14 @@ func awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstanceLoggingConfigur } } -type awsEc2query_deserializeOpDescribeVerifiedAccessInstances struct { +type awsEc2query_deserializeOpDescribeTransitGatewayAttachments struct { } -func (*awsEc2query_deserializeOpDescribeVerifiedAccessInstances) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayAttachments) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayAttachments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35869,9 +35480,9 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstances) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayAttachments(response, &metadata) } - output := &DescribeVerifiedAccessInstancesOutput{} + output := &DescribeTransitGatewayAttachmentsOutput{} out.Result = output var buff [1024]byte @@ -35892,7 +35503,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstances) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayAttachmentsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -35906,7 +35517,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstances) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayAttachments(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -35939,14 +35550,14 @@ func awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstances(response *smi } } -type awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders struct { +type awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers struct { } -func (*awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnectPeers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -35964,9 +35575,9 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessTrustProviders(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayConnectPeers(response, &metadata) } - output := &DescribeVerifiedAccessTrustProvidersOutput{} + output := &DescribeTransitGatewayConnectPeersOutput{} out.Result = output var buff [1024]byte @@ -35987,7 +35598,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessTrustProvidersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayConnectPeersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36001,7 +35612,7 @@ func (m *awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVerifiedAccessTrustProviders(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayConnectPeers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36034,14 +35645,14 @@ func awsEc2query_deserializeOpErrorDescribeVerifiedAccessTrustProviders(response } } -type awsEc2query_deserializeOpDescribeVolumeAttribute struct { +type awsEc2query_deserializeOpDescribeTransitGatewayConnects struct { } -func (*awsEc2query_deserializeOpDescribeVolumeAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayConnects) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVolumeAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayConnects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36059,9 +35670,9 @@ func (m *awsEc2query_deserializeOpDescribeVolumeAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumeAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayConnects(response, &metadata) } - output := &DescribeVolumeAttributeOutput{} + output := &DescribeTransitGatewayConnectsOutput{} out.Result = output var buff [1024]byte @@ -36082,7 +35693,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumeAttribute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVolumeAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayConnectsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36096,7 +35707,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumeAttribute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVolumeAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayConnects(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36129,14 +35740,14 @@ func awsEc2query_deserializeOpErrorDescribeVolumeAttribute(response *smithyhttp. } } -type awsEc2query_deserializeOpDescribeVolumes struct { +type awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains struct { } -func (*awsEc2query_deserializeOpDescribeVolumes) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVolumes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayMulticastDomains) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36154,9 +35765,9 @@ func (m *awsEc2query_deserializeOpDescribeVolumes) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayMulticastDomains(response, &metadata) } - output := &DescribeVolumesOutput{} + output := &DescribeTransitGatewayMulticastDomainsOutput{} out.Result = output var buff [1024]byte @@ -36177,7 +35788,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumes) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVolumesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayMulticastDomainsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36191,7 +35802,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumes) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVolumes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayMulticastDomains(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36224,14 +35835,14 @@ func awsEc2query_deserializeOpErrorDescribeVolumes(response *smithyhttp.Response } } -type awsEc2query_deserializeOpDescribeVolumesModifications struct { +type awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments struct { } -func (*awsEc2query_deserializeOpDescribeVolumesModifications) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVolumesModifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayPeeringAttachments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36249,9 +35860,9 @@ func (m *awsEc2query_deserializeOpDescribeVolumesModifications) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumesModifications(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayPeeringAttachments(response, &metadata) } - output := &DescribeVolumesModificationsOutput{} + output := &DescribeTransitGatewayPeeringAttachmentsOutput{} out.Result = output var buff [1024]byte @@ -36272,7 +35883,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumesModifications) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVolumesModificationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayPeeringAttachmentsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36286,7 +35897,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumesModifications) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVolumesModifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayPeeringAttachments(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36319,14 +35930,14 @@ func awsEc2query_deserializeOpErrorDescribeVolumesModifications(response *smithy } } -type awsEc2query_deserializeOpDescribeVolumeStatus struct { +type awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables struct { } -func (*awsEc2query_deserializeOpDescribeVolumeStatus) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVolumeStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayPolicyTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36344,9 +35955,9 @@ func (m *awsEc2query_deserializeOpDescribeVolumeStatus) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumeStatus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayPolicyTables(response, &metadata) } - output := &DescribeVolumeStatusOutput{} + output := &DescribeTransitGatewayPolicyTablesOutput{} out.Result = output var buff [1024]byte @@ -36367,7 +35978,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumeStatus) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVolumeStatusOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayPolicyTablesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36381,7 +35992,7 @@ func (m *awsEc2query_deserializeOpDescribeVolumeStatus) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVolumeStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayPolicyTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36414,14 +36025,14 @@ func awsEc2query_deserializeOpErrorDescribeVolumeStatus(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDescribeVpcAttribute struct { +type awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements struct { } -func (*awsEc2query_deserializeOpDescribeVpcAttribute) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTableAnnouncements) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36439,9 +36050,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcAttribute) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTableAnnouncements(response, &metadata) } - output := &DescribeVpcAttributeOutput{} + output := &DescribeTransitGatewayRouteTableAnnouncementsOutput{} out.Result = output var buff [1024]byte @@ -36462,7 +36073,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcAttribute) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayRouteTableAnnouncementsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36476,7 +36087,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcAttribute) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTableAnnouncements(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36509,14 +36120,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcAttribute(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions struct { +type awsEc2query_deserializeOpDescribeTransitGatewayRouteTables struct { } -func (*awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayRouteTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36534,9 +36145,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) Handle } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessExclusions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTables(response, &metadata) } - output := &DescribeVpcBlockPublicAccessExclusionsOutput{} + output := &DescribeTransitGatewayRouteTablesOutput{} out.Result = output var buff [1024]byte @@ -36557,7 +36168,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) Handle } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcBlockPublicAccessExclusionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayRouteTablesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36571,7 +36182,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) Handle return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessExclusions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayRouteTables(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36604,14 +36215,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessExclusions(respon } } -type awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions struct { +type awsEc2query_deserializeOpDescribeTransitGateways struct { } -func (*awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36629,9 +36240,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGateways(response, &metadata) } - output := &DescribeVpcBlockPublicAccessOptionsOutput{} + output := &DescribeTransitGatewaysOutput{} out.Result = output var buff [1024]byte @@ -36652,7 +36263,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcBlockPublicAccessOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36666,7 +36277,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36699,14 +36310,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessOptions(response } } -type awsEc2query_deserializeOpDescribeVpcClassicLink struct { +type awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments struct { } -func (*awsEc2query_deserializeOpDescribeVpcClassicLink) ID() string { +func (*awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcClassicLink) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTransitGatewayVpcAttachments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36724,9 +36335,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcClassicLink) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcClassicLink(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTransitGatewayVpcAttachments(response, &metadata) } - output := &DescribeVpcClassicLinkOutput{} + output := &DescribeTransitGatewayVpcAttachmentsOutput{} out.Result = output var buff [1024]byte @@ -36747,7 +36358,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcClassicLink) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcClassicLinkOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTransitGatewayVpcAttachmentsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36761,7 +36372,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcClassicLink) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcClassicLink(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTransitGatewayVpcAttachments(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36794,14 +36405,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcClassicLink(response *smithyhttp.R } } -type awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport struct { +type awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations struct { } -func (*awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) ID() string { +func (*awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeTrunkInterfaceAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36819,9 +36430,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcClassicLinkDnsSupport(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeTrunkInterfaceAssociations(response, &metadata) } - output := &DescribeVpcClassicLinkDnsSupportOutput{} + output := &DescribeTrunkInterfaceAssociationsOutput{} out.Result = output var buff [1024]byte @@ -36842,7 +36453,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcClassicLinkDnsSupportOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeTrunkInterfaceAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36856,7 +36467,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcClassicLinkDnsSupport(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeTrunkInterfaceAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36889,14 +36500,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcClassicLinkDnsSupport(response *sm } } -type awsEc2query_deserializeOpDescribeVpcEndpointAssociations struct { +type awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpointAssociations) ID() string { +func (*awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpointAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVerifiedAccessEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -36914,9 +36525,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointAssociations) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessEndpoints(response, &metadata) } - output := &DescribeVpcEndpointAssociationsOutput{} + output := &DescribeVerifiedAccessEndpointsOutput{} out.Result = output var buff [1024]byte @@ -36937,7 +36548,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointAssociations) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessEndpointsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -36951,7 +36562,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointAssociations) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpointAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVerifiedAccessEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36984,14 +36595,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpointAssociations(response *smi } } -type awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications struct { +type awsEc2query_deserializeOpDescribeVerifiedAccessGroups struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) ID() string { +func (*awsEc2query_deserializeOpDescribeVerifiedAccessGroups) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVerifiedAccessGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37009,9 +36620,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointConnectionNotifications(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessGroups(response, &metadata) } - output := &DescribeVpcEndpointConnectionNotificationsOutput{} + output := &DescribeVerifiedAccessGroupsOutput{} out.Result = output var buff [1024]byte @@ -37032,7 +36643,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointConnectionNotificationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37046,7 +36657,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpointConnectionNotifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVerifiedAccessGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37079,14 +36690,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpointConnectionNotifications(re } } -type awsEc2query_deserializeOpDescribeVpcEndpointConnections struct { +type awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigurations struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpointConnections) ID() string { +func (*awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37104,9 +36715,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnections) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointConnections(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstanceLoggingConfigurations(response, &metadata) } - output := &DescribeVpcEndpointConnectionsOutput{} + output := &DescribeVerifiedAccessInstanceLoggingConfigurationsOutput{} out.Result = output var buff [1024]byte @@ -37127,7 +36738,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnections) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointConnectionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessInstanceLoggingConfigurationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37141,7 +36752,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnections) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpointConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstanceLoggingConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37174,14 +36785,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpointConnections(response *smit } } -type awsEc2query_deserializeOpDescribeVpcEndpoints struct { +type awsEc2query_deserializeOpDescribeVerifiedAccessInstances struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpoints) ID() string { +func (*awsEc2query_deserializeOpDescribeVerifiedAccessInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVerifiedAccessInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37199,9 +36810,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpoints) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpoints(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstances(response, &metadata) } - output := &DescribeVpcEndpointsOutput{} + output := &DescribeVerifiedAccessInstancesOutput{} out.Result = output var buff [1024]byte @@ -37222,7 +36833,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpoints) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37236,7 +36847,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpoints) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVerifiedAccessInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37269,14 +36880,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpoints(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations struct { +type awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) ID() string { +func (*awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVerifiedAccessTrustProviders) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37294,9 +36905,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointServiceConfigurations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVerifiedAccessTrustProviders(response, &metadata) } - output := &DescribeVpcEndpointServiceConfigurationsOutput{} + output := &DescribeVerifiedAccessTrustProvidersOutput{} out.Result = output var buff [1024]byte @@ -37317,7 +36928,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointServiceConfigurationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVerifiedAccessTrustProvidersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37331,7 +36942,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpointServiceConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVerifiedAccessTrustProviders(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37364,14 +36975,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpointServiceConfigurations(resp } } -type awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions struct { +type awsEc2query_deserializeOpDescribeVolumeAttribute struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) ID() string { +func (*awsEc2query_deserializeOpDescribeVolumeAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVolumeAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37389,9 +37000,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointServicePermissions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumeAttribute(response, &metadata) } - output := &DescribeVpcEndpointServicePermissionsOutput{} + output := &DescribeVolumeAttributeOutput{} out.Result = output var buff [1024]byte @@ -37412,7 +37023,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointServicePermissionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVolumeAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37426,7 +37037,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpointServicePermissions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVolumeAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37459,14 +37070,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpointServicePermissions(respons } } -type awsEc2query_deserializeOpDescribeVpcEndpointServices struct { +type awsEc2query_deserializeOpDescribeVolumes struct { } -func (*awsEc2query_deserializeOpDescribeVpcEndpointServices) ID() string { +func (*awsEc2query_deserializeOpDescribeVolumes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcEndpointServices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVolumes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37484,9 +37095,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServices) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointServices(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumes(response, &metadata) } - output := &DescribeVpcEndpointServicesOutput{} + output := &DescribeVolumesOutput{} out.Result = output var buff [1024]byte @@ -37507,7 +37118,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServices) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointServicesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVolumesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37521,7 +37132,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcEndpointServices) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcEndpointServices(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVolumes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37554,14 +37165,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcEndpointServices(response *smithyh } } -type awsEc2query_deserializeOpDescribeVpcPeeringConnections struct { +type awsEc2query_deserializeOpDescribeVolumesModifications struct { } -func (*awsEc2query_deserializeOpDescribeVpcPeeringConnections) ID() string { +func (*awsEc2query_deserializeOpDescribeVolumesModifications) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcPeeringConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVolumesModifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37579,9 +37190,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcPeeringConnections) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcPeeringConnections(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumesModifications(response, &metadata) } - output := &DescribeVpcPeeringConnectionsOutput{} + output := &DescribeVolumesModificationsOutput{} out.Result = output var buff [1024]byte @@ -37602,7 +37213,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcPeeringConnections) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcPeeringConnectionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVolumesModificationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37616,7 +37227,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcPeeringConnections) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcPeeringConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVolumesModifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37649,14 +37260,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcPeeringConnections(response *smith } } -type awsEc2query_deserializeOpDescribeVpcs struct { +type awsEc2query_deserializeOpDescribeVolumeStatus struct { } -func (*awsEc2query_deserializeOpDescribeVpcs) ID() string { +func (*awsEc2query_deserializeOpDescribeVolumeStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpcs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVolumeStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37674,9 +37285,9 @@ func (m *awsEc2query_deserializeOpDescribeVpcs) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVolumeStatus(response, &metadata) } - output := &DescribeVpcsOutput{} + output := &DescribeVolumeStatusOutput{} out.Result = output var buff [1024]byte @@ -37697,7 +37308,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcs) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpcsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVolumeStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37711,7 +37322,7 @@ func (m *awsEc2query_deserializeOpDescribeVpcs) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpcs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVolumeStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37744,14 +37355,14 @@ func awsEc2query_deserializeOpErrorDescribeVpcs(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDescribeVpnConnections struct { +type awsEc2query_deserializeOpDescribeVpcAttribute struct { } -func (*awsEc2query_deserializeOpDescribeVpnConnections) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpnConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37769,9 +37380,9 @@ func (m *awsEc2query_deserializeOpDescribeVpnConnections) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpnConnections(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcAttribute(response, &metadata) } - output := &DescribeVpnConnectionsOutput{} + output := &DescribeVpcAttributeOutput{} out.Result = output var buff [1024]byte @@ -37792,7 +37403,7 @@ func (m *awsEc2query_deserializeOpDescribeVpnConnections) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpnConnectionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37806,7 +37417,7 @@ func (m *awsEc2query_deserializeOpDescribeVpnConnections) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpnConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37839,14 +37450,14 @@ func awsEc2query_deserializeOpErrorDescribeVpnConnections(response *smithyhttp.R } } -type awsEc2query_deserializeOpDescribeVpnGateways struct { +type awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions struct { } -func (*awsEc2query_deserializeOpDescribeVpnGateways) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDescribeVpnGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessExclusions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37864,9 +37475,9 @@ func (m *awsEc2query_deserializeOpDescribeVpnGateways) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDescribeVpnGateways(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessExclusions(response, &metadata) } - output := &DescribeVpnGatewaysOutput{} + output := &DescribeVpcBlockPublicAccessExclusionsOutput{} out.Result = output var buff [1024]byte @@ -37887,7 +37498,7 @@ func (m *awsEc2query_deserializeOpDescribeVpnGateways) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDescribeVpnGatewaysOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcBlockPublicAccessExclusionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37901,7 +37512,7 @@ func (m *awsEc2query_deserializeOpDescribeVpnGateways) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorDescribeVpnGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessExclusions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -37934,14 +37545,14 @@ func awsEc2query_deserializeOpErrorDescribeVpnGateways(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDetachClassicLinkVpc struct { +type awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions struct { } -func (*awsEc2query_deserializeOpDetachClassicLinkVpc) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDetachClassicLinkVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcBlockPublicAccessOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -37959,9 +37570,9 @@ func (m *awsEc2query_deserializeOpDetachClassicLinkVpc) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDetachClassicLinkVpc(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessOptions(response, &metadata) } - output := &DetachClassicLinkVpcOutput{} + output := &DescribeVpcBlockPublicAccessOptionsOutput{} out.Result = output var buff [1024]byte @@ -37982,7 +37593,7 @@ func (m *awsEc2query_deserializeOpDetachClassicLinkVpc) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDetachClassicLinkVpcOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcBlockPublicAccessOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -37996,7 +37607,7 @@ func (m *awsEc2query_deserializeOpDetachClassicLinkVpc) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorDetachClassicLinkVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcBlockPublicAccessOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38029,14 +37640,14 @@ func awsEc2query_deserializeOpErrorDetachClassicLinkVpc(response *smithyhttp.Res } } -type awsEc2query_deserializeOpDetachInternetGateway struct { +type awsEc2query_deserializeOpDescribeVpcClassicLink struct { } -func (*awsEc2query_deserializeOpDetachInternetGateway) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcClassicLink) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDetachInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcClassicLink) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38054,21 +37665,44 @@ func (m *awsEc2query_deserializeOpDetachInternetGateway) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDetachInternetGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcClassicLink(response, &metadata) } - output := &DetachInternetGatewayOutput{} + output := &DescribeVpcClassicLinkOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDescribeVpcClassicLinkOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDetachInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcClassicLink(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38101,14 +37735,14 @@ func awsEc2query_deserializeOpErrorDetachInternetGateway(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDetachNetworkInterface struct { +type awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport struct { } -func (*awsEc2query_deserializeOpDetachNetworkInterface) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDetachNetworkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcClassicLinkDnsSupport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38126,21 +37760,44 @@ func (m *awsEc2query_deserializeOpDetachNetworkInterface) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDetachNetworkInterface(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcClassicLinkDnsSupport(response, &metadata) } - output := &DetachNetworkInterfaceOutput{} + output := &DescribeVpcClassicLinkDnsSupportOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDescribeVpcClassicLinkDnsSupportOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDetachNetworkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcClassicLinkDnsSupport(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38173,14 +37830,14 @@ func awsEc2query_deserializeOpErrorDetachNetworkInterface(response *smithyhttp.R } } -type awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider struct { +type awsEc2query_deserializeOpDescribeVpcEndpointAssociations struct { } -func (*awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpointAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpointAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38198,9 +37855,9 @@ func (m *awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDetachVerifiedAccessTrustProvider(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointAssociations(response, &metadata) } - output := &DetachVerifiedAccessTrustProviderOutput{} + output := &DescribeVpcEndpointAssociationsOutput{} out.Result = output var buff [1024]byte @@ -38221,7 +37878,7 @@ func (m *awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDetachVerifiedAccessTrustProviderOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38235,7 +37892,7 @@ func (m *awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDetachVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpointAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38268,14 +37925,14 @@ func awsEc2query_deserializeOpErrorDetachVerifiedAccessTrustProvider(response *s } } -type awsEc2query_deserializeOpDetachVolume struct { +type awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications struct { } -func (*awsEc2query_deserializeOpDetachVolume) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDetachVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnectionNotifications) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38293,9 +37950,9 @@ func (m *awsEc2query_deserializeOpDetachVolume) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDetachVolume(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointConnectionNotifications(response, &metadata) } - output := &DetachVolumeOutput{} + output := &DescribeVpcEndpointConnectionNotificationsOutput{} out.Result = output var buff [1024]byte @@ -38316,7 +37973,7 @@ func (m *awsEc2query_deserializeOpDetachVolume) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDetachVolumeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointConnectionNotificationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38330,7 +37987,7 @@ func (m *awsEc2query_deserializeOpDetachVolume) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorDetachVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpointConnectionNotifications(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38363,14 +38020,14 @@ func awsEc2query_deserializeOpErrorDetachVolume(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDetachVpnGateway struct { +type awsEc2query_deserializeOpDescribeVpcEndpointConnections struct { } -func (*awsEc2query_deserializeOpDetachVpnGateway) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpointConnections) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDetachVpnGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpointConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38388,21 +38045,44 @@ func (m *awsEc2query_deserializeOpDetachVpnGateway) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDetachVpnGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointConnections(response, &metadata) } - output := &DetachVpnGatewayOutput{} + output := &DescribeVpcEndpointConnectionsOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointConnectionsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDetachVpnGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpointConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38435,14 +38115,14 @@ func awsEc2query_deserializeOpErrorDetachVpnGateway(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpDisableAddressTransfer struct { +type awsEc2query_deserializeOpDescribeVpcEndpoints struct { } -func (*awsEc2query_deserializeOpDisableAddressTransfer) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableAddressTransfer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38460,9 +38140,9 @@ func (m *awsEc2query_deserializeOpDisableAddressTransfer) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableAddressTransfer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpoints(response, &metadata) } - output := &DisableAddressTransferOutput{} + output := &DescribeVpcEndpointsOutput{} out.Result = output var buff [1024]byte @@ -38483,7 +38163,7 @@ func (m *awsEc2query_deserializeOpDisableAddressTransfer) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableAddressTransferOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38497,7 +38177,7 @@ func (m *awsEc2query_deserializeOpDisableAddressTransfer) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableAddressTransfer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38530,14 +38210,14 @@ func awsEc2query_deserializeOpErrorDisableAddressTransfer(response *smithyhttp.R } } -type awsEc2query_deserializeOpDisableAllowedImagesSettings struct { +type awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations struct { } -func (*awsEc2query_deserializeOpDisableAllowedImagesSettings) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpointServiceConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38555,9 +38235,9 @@ func (m *awsEc2query_deserializeOpDisableAllowedImagesSettings) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableAllowedImagesSettings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointServiceConfigurations(response, &metadata) } - output := &DisableAllowedImagesSettingsOutput{} + output := &DescribeVpcEndpointServiceConfigurationsOutput{} out.Result = output var buff [1024]byte @@ -38578,7 +38258,7 @@ func (m *awsEc2query_deserializeOpDisableAllowedImagesSettings) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableAllowedImagesSettingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointServiceConfigurationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38592,7 +38272,7 @@ func (m *awsEc2query_deserializeOpDisableAllowedImagesSettings) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpointServiceConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38625,14 +38305,14 @@ func awsEc2query_deserializeOpErrorDisableAllowedImagesSettings(response *smithy } } -type awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription struct { +type awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions struct { } -func (*awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpointServicePermissions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38650,9 +38330,9 @@ func (m *awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableAwsNetworkPerformanceMetricSubscription(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointServicePermissions(response, &metadata) } - output := &DisableAwsNetworkPerformanceMetricSubscriptionOutput{} + output := &DescribeVpcEndpointServicePermissionsOutput{} out.Result = output var buff [1024]byte @@ -38673,7 +38353,7 @@ func (m *awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableAwsNetworkPerformanceMetricSubscriptionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointServicePermissionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38687,7 +38367,7 @@ func (m *awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableAwsNetworkPerformanceMetricSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpointServicePermissions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38720,14 +38400,14 @@ func awsEc2query_deserializeOpErrorDisableAwsNetworkPerformanceMetricSubscriptio } } -type awsEc2query_deserializeOpDisableEbsEncryptionByDefault struct { +type awsEc2query_deserializeOpDescribeVpcEndpointServices struct { } -func (*awsEc2query_deserializeOpDisableEbsEncryptionByDefault) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcEndpointServices) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableEbsEncryptionByDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcEndpointServices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38745,9 +38425,9 @@ func (m *awsEc2query_deserializeOpDisableEbsEncryptionByDefault) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableEbsEncryptionByDefault(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcEndpointServices(response, &metadata) } - output := &DisableEbsEncryptionByDefaultOutput{} + output := &DescribeVpcEndpointServicesOutput{} out.Result = output var buff [1024]byte @@ -38768,7 +38448,7 @@ func (m *awsEc2query_deserializeOpDisableEbsEncryptionByDefault) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableEbsEncryptionByDefaultOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcEndpointServicesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38782,7 +38462,7 @@ func (m *awsEc2query_deserializeOpDisableEbsEncryptionByDefault) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableEbsEncryptionByDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcEndpointServices(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38815,14 +38495,14 @@ func awsEc2query_deserializeOpErrorDisableEbsEncryptionByDefault(response *smith } } -type awsEc2query_deserializeOpDisableFastLaunch struct { +type awsEc2query_deserializeOpDescribeVpcPeeringConnections struct { } -func (*awsEc2query_deserializeOpDisableFastLaunch) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcPeeringConnections) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableFastLaunch) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcPeeringConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38840,9 +38520,9 @@ func (m *awsEc2query_deserializeOpDisableFastLaunch) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableFastLaunch(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcPeeringConnections(response, &metadata) } - output := &DisableFastLaunchOutput{} + output := &DescribeVpcPeeringConnectionsOutput{} out.Result = output var buff [1024]byte @@ -38863,7 +38543,7 @@ func (m *awsEc2query_deserializeOpDisableFastLaunch) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableFastLaunchOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcPeeringConnectionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38877,7 +38557,7 @@ func (m *awsEc2query_deserializeOpDisableFastLaunch) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableFastLaunch(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcPeeringConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -38910,14 +38590,14 @@ func awsEc2query_deserializeOpErrorDisableFastLaunch(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpDisableFastSnapshotRestores struct { +type awsEc2query_deserializeOpDescribeVpcs struct { } -func (*awsEc2query_deserializeOpDisableFastSnapshotRestores) ID() string { +func (*awsEc2query_deserializeOpDescribeVpcs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableFastSnapshotRestores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpcs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -38935,9 +38615,9 @@ func (m *awsEc2query_deserializeOpDisableFastSnapshotRestores) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableFastSnapshotRestores(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpcs(response, &metadata) } - output := &DisableFastSnapshotRestoresOutput{} + output := &DescribeVpcsOutput{} out.Result = output var buff [1024]byte @@ -38958,7 +38638,7 @@ func (m *awsEc2query_deserializeOpDisableFastSnapshotRestores) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableFastSnapshotRestoresOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpcsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -38972,7 +38652,7 @@ func (m *awsEc2query_deserializeOpDisableFastSnapshotRestores) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableFastSnapshotRestores(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpcs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39005,14 +38685,14 @@ func awsEc2query_deserializeOpErrorDisableFastSnapshotRestores(response *smithyh } } -type awsEc2query_deserializeOpDisableImage struct { +type awsEc2query_deserializeOpDescribeVpnConnections struct { } -func (*awsEc2query_deserializeOpDisableImage) ID() string { +func (*awsEc2query_deserializeOpDescribeVpnConnections) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpnConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39030,9 +38710,9 @@ func (m *awsEc2query_deserializeOpDisableImage) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpnConnections(response, &metadata) } - output := &DisableImageOutput{} + output := &DescribeVpnConnectionsOutput{} out.Result = output var buff [1024]byte @@ -39053,7 +38733,7 @@ func (m *awsEc2query_deserializeOpDisableImage) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpnConnectionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39067,7 +38747,7 @@ func (m *awsEc2query_deserializeOpDisableImage) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpnConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39100,14 +38780,14 @@ func awsEc2query_deserializeOpErrorDisableImage(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpDisableImageBlockPublicAccess struct { +type awsEc2query_deserializeOpDescribeVpnGateways struct { } -func (*awsEc2query_deserializeOpDisableImageBlockPublicAccess) ID() string { +func (*awsEc2query_deserializeOpDescribeVpnGateways) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableImageBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDescribeVpnGateways) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39125,9 +38805,9 @@ func (m *awsEc2query_deserializeOpDisableImageBlockPublicAccess) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableImageBlockPublicAccess(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDescribeVpnGateways(response, &metadata) } - output := &DisableImageBlockPublicAccessOutput{} + output := &DescribeVpnGatewaysOutput{} out.Result = output var buff [1024]byte @@ -39148,7 +38828,7 @@ func (m *awsEc2query_deserializeOpDisableImageBlockPublicAccess) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableImageBlockPublicAccessOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDescribeVpnGatewaysOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39162,7 +38842,7 @@ func (m *awsEc2query_deserializeOpDisableImageBlockPublicAccess) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableImageBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDescribeVpnGateways(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39195,14 +38875,14 @@ func awsEc2query_deserializeOpErrorDisableImageBlockPublicAccess(response *smith } } -type awsEc2query_deserializeOpDisableImageDeprecation struct { +type awsEc2query_deserializeOpDetachClassicLinkVpc struct { } -func (*awsEc2query_deserializeOpDisableImageDeprecation) ID() string { +func (*awsEc2query_deserializeOpDetachClassicLinkVpc) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableImageDeprecation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDetachClassicLinkVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39220,9 +38900,9 @@ func (m *awsEc2query_deserializeOpDisableImageDeprecation) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableImageDeprecation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDetachClassicLinkVpc(response, &metadata) } - output := &DisableImageDeprecationOutput{} + output := &DetachClassicLinkVpcOutput{} out.Result = output var buff [1024]byte @@ -39243,7 +38923,7 @@ func (m *awsEc2query_deserializeOpDisableImageDeprecation) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableImageDeprecationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDetachClassicLinkVpcOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39257,7 +38937,7 @@ func (m *awsEc2query_deserializeOpDisableImageDeprecation) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableImageDeprecation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDetachClassicLinkVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39290,14 +38970,14 @@ func awsEc2query_deserializeOpErrorDisableImageDeprecation(response *smithyhttp. } } -type awsEc2query_deserializeOpDisableImageDeregistrationProtection struct { +type awsEc2query_deserializeOpDetachInternetGateway struct { } -func (*awsEc2query_deserializeOpDisableImageDeregistrationProtection) ID() string { +func (*awsEc2query_deserializeOpDetachInternetGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableImageDeregistrationProtection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDetachInternetGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39315,44 +38995,93 @@ func (m *awsEc2query_deserializeOpDisableImageDeregistrationProtection) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableImageDeregistrationProtection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDetachInternetGateway(response, &metadata) } - output := &DisableImageDeregistrationProtectionOutput{} + output := &DetachInternetGatewayOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } } - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableImageDeregistrationProtectionOutput(&output, decoder) + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorDetachInternetGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, } + return genericError + + } +} + +type awsEc2query_deserializeOpDetachNetworkInterface struct { +} + +func (*awsEc2query_deserializeOpDetachNetworkInterface) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpDetachNetworkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorDetachNetworkInterface(response, &metadata) + } + output := &DetachNetworkInterfaceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableImageDeregistrationProtection(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDetachNetworkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39385,14 +39114,14 @@ func awsEc2query_deserializeOpErrorDisableImageDeregistrationProtection(response } } -type awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount struct { +type awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider struct { } -func (*awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) ID() string { +func (*awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDetachVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39410,9 +39139,9 @@ func (m *awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableIpamOrganizationAdminAccount(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDetachVerifiedAccessTrustProvider(response, &metadata) } - output := &DisableIpamOrganizationAdminAccountOutput{} + output := &DetachVerifiedAccessTrustProviderOutput{} out.Result = output var buff [1024]byte @@ -39433,7 +39162,7 @@ func (m *awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableIpamOrganizationAdminAccountOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDetachVerifiedAccessTrustProviderOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39447,7 +39176,7 @@ func (m *awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableIpamOrganizationAdminAccount(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDetachVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39480,14 +39209,14 @@ func awsEc2query_deserializeOpErrorDisableIpamOrganizationAdminAccount(response } } -type awsEc2query_deserializeOpDisableRouteServerPropagation struct { +type awsEc2query_deserializeOpDetachVolume struct { } -func (*awsEc2query_deserializeOpDisableRouteServerPropagation) ID() string { +func (*awsEc2query_deserializeOpDetachVolume) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableRouteServerPropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDetachVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39505,9 +39234,9 @@ func (m *awsEc2query_deserializeOpDisableRouteServerPropagation) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableRouteServerPropagation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDetachVolume(response, &metadata) } - output := &DisableRouteServerPropagationOutput{} + output := &DetachVolumeOutput{} out.Result = output var buff [1024]byte @@ -39528,7 +39257,7 @@ func (m *awsEc2query_deserializeOpDisableRouteServerPropagation) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableRouteServerPropagationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDetachVolumeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39542,7 +39271,7 @@ func (m *awsEc2query_deserializeOpDisableRouteServerPropagation) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableRouteServerPropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDetachVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39575,14 +39304,14 @@ func awsEc2query_deserializeOpErrorDisableRouteServerPropagation(response *smith } } -type awsEc2query_deserializeOpDisableSerialConsoleAccess struct { +type awsEc2query_deserializeOpDetachVpnGateway struct { } -func (*awsEc2query_deserializeOpDisableSerialConsoleAccess) ID() string { +func (*awsEc2query_deserializeOpDetachVpnGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableSerialConsoleAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDetachVpnGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39600,9 +39329,81 @@ func (m *awsEc2query_deserializeOpDisableSerialConsoleAccess) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableSerialConsoleAccess(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDetachVpnGateway(response, &metadata) } - output := &DisableSerialConsoleAccessOutput{} + output := &DetachVpnGatewayOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorDetachVpnGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpDisableAddressTransfer struct { +} + +func (*awsEc2query_deserializeOpDisableAddressTransfer) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpDisableAddressTransfer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorDisableAddressTransfer(response, &metadata) + } + output := &DisableAddressTransferOutput{} out.Result = output var buff [1024]byte @@ -39623,7 +39424,7 @@ func (m *awsEc2query_deserializeOpDisableSerialConsoleAccess) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableSerialConsoleAccessOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableAddressTransferOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39637,7 +39438,7 @@ func (m *awsEc2query_deserializeOpDisableSerialConsoleAccess) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableSerialConsoleAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableAddressTransfer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39670,14 +39471,14 @@ func awsEc2query_deserializeOpErrorDisableSerialConsoleAccess(response *smithyht } } -type awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess struct { +type awsEc2query_deserializeOpDisableAllowedImagesSettings struct { } -func (*awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) ID() string { +func (*awsEc2query_deserializeOpDisableAllowedImagesSettings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39695,9 +39496,9 @@ func (m *awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableSnapshotBlockPublicAccess(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableAllowedImagesSettings(response, &metadata) } - output := &DisableSnapshotBlockPublicAccessOutput{} + output := &DisableAllowedImagesSettingsOutput{} out.Result = output var buff [1024]byte @@ -39718,7 +39519,7 @@ func (m *awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableSnapshotBlockPublicAccessOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableAllowedImagesSettingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39732,7 +39533,7 @@ func (m *awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableSnapshotBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39765,14 +39566,14 @@ func awsEc2query_deserializeOpErrorDisableSnapshotBlockPublicAccess(response *sm } } -type awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation struct { +type awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription struct { } -func (*awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) ID() string { +func (*awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableAwsNetworkPerformanceMetricSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39790,9 +39591,9 @@ func (m *awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableTransitGatewayRouteTablePropagation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableAwsNetworkPerformanceMetricSubscription(response, &metadata) } - output := &DisableTransitGatewayRouteTablePropagationOutput{} + output := &DisableAwsNetworkPerformanceMetricSubscriptionOutput{} out.Result = output var buff [1024]byte @@ -39813,7 +39614,7 @@ func (m *awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableTransitGatewayRouteTablePropagationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableAwsNetworkPerformanceMetricSubscriptionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39827,7 +39628,7 @@ func (m *awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableTransitGatewayRouteTablePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableAwsNetworkPerformanceMetricSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39860,14 +39661,14 @@ func awsEc2query_deserializeOpErrorDisableTransitGatewayRouteTablePropagation(re } } -type awsEc2query_deserializeOpDisableVgwRoutePropagation struct { +type awsEc2query_deserializeOpDisableEbsEncryptionByDefault struct { } -func (*awsEc2query_deserializeOpDisableVgwRoutePropagation) ID() string { +func (*awsEc2query_deserializeOpDisableEbsEncryptionByDefault) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableVgwRoutePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableEbsEncryptionByDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39885,21 +39686,44 @@ func (m *awsEc2query_deserializeOpDisableVgwRoutePropagation) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableVgwRoutePropagation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableEbsEncryptionByDefault(response, &metadata) } - output := &DisableVgwRoutePropagationOutput{} + output := &DisableEbsEncryptionByDefaultOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDisableEbsEncryptionByDefaultOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableVgwRoutePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableEbsEncryptionByDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -39932,14 +39756,14 @@ func awsEc2query_deserializeOpErrorDisableVgwRoutePropagation(response *smithyht } } -type awsEc2query_deserializeOpDisableVpcClassicLink struct { +type awsEc2query_deserializeOpDisableFastLaunch struct { } -func (*awsEc2query_deserializeOpDisableVpcClassicLink) ID() string { +func (*awsEc2query_deserializeOpDisableFastLaunch) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableVpcClassicLink) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableFastLaunch) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -39957,9 +39781,9 @@ func (m *awsEc2query_deserializeOpDisableVpcClassicLink) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableVpcClassicLink(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableFastLaunch(response, &metadata) } - output := &DisableVpcClassicLinkOutput{} + output := &DisableFastLaunchOutput{} out.Result = output var buff [1024]byte @@ -39980,7 +39804,7 @@ func (m *awsEc2query_deserializeOpDisableVpcClassicLink) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableVpcClassicLinkOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableFastLaunchOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -39994,7 +39818,7 @@ func (m *awsEc2query_deserializeOpDisableVpcClassicLink) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableVpcClassicLink(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableFastLaunch(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40027,14 +39851,14 @@ func awsEc2query_deserializeOpErrorDisableVpcClassicLink(response *smithyhttp.Re } } -type awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport struct { +type awsEc2query_deserializeOpDisableFastSnapshotRestores struct { } -func (*awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) ID() string { +func (*awsEc2query_deserializeOpDisableFastSnapshotRestores) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableFastSnapshotRestores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40052,9 +39876,9 @@ func (m *awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisableVpcClassicLinkDnsSupport(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableFastSnapshotRestores(response, &metadata) } - output := &DisableVpcClassicLinkDnsSupportOutput{} + output := &DisableFastSnapshotRestoresOutput{} out.Result = output var buff [1024]byte @@ -40075,7 +39899,7 @@ func (m *awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisableVpcClassicLinkDnsSupportOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableFastSnapshotRestoresOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40089,7 +39913,7 @@ func (m *awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDisableVpcClassicLinkDnsSupport(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableFastSnapshotRestores(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40122,14 +39946,14 @@ func awsEc2query_deserializeOpErrorDisableVpcClassicLinkDnsSupport(response *smi } } -type awsEc2query_deserializeOpDisassociateAddress struct { +type awsEc2query_deserializeOpDisableImage struct { } -func (*awsEc2query_deserializeOpDisassociateAddress) ID() string { +func (*awsEc2query_deserializeOpDisableImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40147,21 +39971,44 @@ func (m *awsEc2query_deserializeOpDisassociateAddress) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateAddress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableImage(response, &metadata) } - output := &DisassociateAddressOutput{} + output := &DisableImageOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDisableImageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40194,14 +40041,14 @@ func awsEc2query_deserializeOpErrorDisassociateAddress(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner struct { +type awsEc2query_deserializeOpDisableImageBlockPublicAccess struct { } -func (*awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) ID() string { +func (*awsEc2query_deserializeOpDisableImageBlockPublicAccess) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableImageBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40219,9 +40066,9 @@ func (m *awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateCapacityReservationBillingOwner(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableImageBlockPublicAccess(response, &metadata) } - output := &DisassociateCapacityReservationBillingOwnerOutput{} + output := &DisableImageBlockPublicAccessOutput{} out.Result = output var buff [1024]byte @@ -40242,7 +40089,7 @@ func (m *awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateCapacityReservationBillingOwnerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableImageBlockPublicAccessOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40256,7 +40103,7 @@ func (m *awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) H return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateCapacityReservationBillingOwner(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableImageBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40289,14 +40136,14 @@ func awsEc2query_deserializeOpErrorDisassociateCapacityReservationBillingOwner(r } } -type awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork struct { +type awsEc2query_deserializeOpDisableImageDeprecation struct { } -func (*awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) ID() string { +func (*awsEc2query_deserializeOpDisableImageDeprecation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableImageDeprecation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40314,9 +40161,9 @@ func (m *awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateClientVpnTargetNetwork(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableImageDeprecation(response, &metadata) } - output := &DisassociateClientVpnTargetNetworkOutput{} + output := &DisableImageDeprecationOutput{} out.Result = output var buff [1024]byte @@ -40337,7 +40184,7 @@ func (m *awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateClientVpnTargetNetworkOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableImageDeprecationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40351,7 +40198,7 @@ func (m *awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateClientVpnTargetNetwork(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableImageDeprecation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40384,14 +40231,14 @@ func awsEc2query_deserializeOpErrorDisassociateClientVpnTargetNetwork(response * } } -type awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole struct { +type awsEc2query_deserializeOpDisableImageDeregistrationProtection struct { } -func (*awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) ID() string { +func (*awsEc2query_deserializeOpDisableImageDeregistrationProtection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableImageDeregistrationProtection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40409,9 +40256,9 @@ func (m *awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateEnclaveCertificateIamRole(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableImageDeregistrationProtection(response, &metadata) } - output := &DisassociateEnclaveCertificateIamRoleOutput{} + output := &DisableImageDeregistrationProtectionOutput{} out.Result = output var buff [1024]byte @@ -40432,7 +40279,7 @@ func (m *awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateEnclaveCertificateIamRoleOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableImageDeregistrationProtectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40446,7 +40293,7 @@ func (m *awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateEnclaveCertificateIamRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableImageDeregistrationProtection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40479,14 +40326,14 @@ func awsEc2query_deserializeOpErrorDisassociateEnclaveCertificateIamRole(respons } } -type awsEc2query_deserializeOpDisassociateIamInstanceProfile struct { +type awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount struct { } -func (*awsEc2query_deserializeOpDisassociateIamInstanceProfile) ID() string { +func (*awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateIamInstanceProfile) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableIpamOrganizationAdminAccount) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40504,9 +40351,9 @@ func (m *awsEc2query_deserializeOpDisassociateIamInstanceProfile) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateIamInstanceProfile(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableIpamOrganizationAdminAccount(response, &metadata) } - output := &DisassociateIamInstanceProfileOutput{} + output := &DisableIpamOrganizationAdminAccountOutput{} out.Result = output var buff [1024]byte @@ -40527,7 +40374,7 @@ func (m *awsEc2query_deserializeOpDisassociateIamInstanceProfile) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateIamInstanceProfileOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableIpamOrganizationAdminAccountOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40541,7 +40388,7 @@ func (m *awsEc2query_deserializeOpDisassociateIamInstanceProfile) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateIamInstanceProfile(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableIpamOrganizationAdminAccount(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40574,14 +40421,14 @@ func awsEc2query_deserializeOpErrorDisassociateIamInstanceProfile(response *smit } } -type awsEc2query_deserializeOpDisassociateInstanceEventWindow struct { +type awsEc2query_deserializeOpDisableRouteServerPropagation struct { } -func (*awsEc2query_deserializeOpDisassociateInstanceEventWindow) ID() string { +func (*awsEc2query_deserializeOpDisableRouteServerPropagation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableRouteServerPropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40599,9 +40446,9 @@ func (m *awsEc2query_deserializeOpDisassociateInstanceEventWindow) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateInstanceEventWindow(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableRouteServerPropagation(response, &metadata) } - output := &DisassociateInstanceEventWindowOutput{} + output := &DisableRouteServerPropagationOutput{} out.Result = output var buff [1024]byte @@ -40622,7 +40469,7 @@ func (m *awsEc2query_deserializeOpDisassociateInstanceEventWindow) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateInstanceEventWindowOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableRouteServerPropagationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40636,7 +40483,7 @@ func (m *awsEc2query_deserializeOpDisassociateInstanceEventWindow) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableRouteServerPropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40669,14 +40516,14 @@ func awsEc2query_deserializeOpErrorDisassociateInstanceEventWindow(response *smi } } -type awsEc2query_deserializeOpDisassociateIpamByoasn struct { +type awsEc2query_deserializeOpDisableSerialConsoleAccess struct { } -func (*awsEc2query_deserializeOpDisassociateIpamByoasn) ID() string { +func (*awsEc2query_deserializeOpDisableSerialConsoleAccess) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableSerialConsoleAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40694,9 +40541,9 @@ func (m *awsEc2query_deserializeOpDisassociateIpamByoasn) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateIpamByoasn(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableSerialConsoleAccess(response, &metadata) } - output := &DisassociateIpamByoasnOutput{} + output := &DisableSerialConsoleAccessOutput{} out.Result = output var buff [1024]byte @@ -40717,7 +40564,7 @@ func (m *awsEc2query_deserializeOpDisassociateIpamByoasn) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateIpamByoasnOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableSerialConsoleAccessOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40731,7 +40578,7 @@ func (m *awsEc2query_deserializeOpDisassociateIpamByoasn) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableSerialConsoleAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40764,14 +40611,14 @@ func awsEc2query_deserializeOpErrorDisassociateIpamByoasn(response *smithyhttp.R } } -type awsEc2query_deserializeOpDisassociateIpamResourceDiscovery struct { +type awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess struct { } -func (*awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) ID() string { +func (*awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableSnapshotBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40789,9 +40636,9 @@ func (m *awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateIpamResourceDiscovery(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableSnapshotBlockPublicAccess(response, &metadata) } - output := &DisassociateIpamResourceDiscoveryOutput{} + output := &DisableSnapshotBlockPublicAccessOutput{} out.Result = output var buff [1024]byte @@ -40812,7 +40659,7 @@ func (m *awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateIpamResourceDiscoveryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableSnapshotBlockPublicAccessOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40826,7 +40673,7 @@ func (m *awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableSnapshotBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40859,14 +40706,14 @@ func awsEc2query_deserializeOpErrorDisassociateIpamResourceDiscovery(response *s } } -type awsEc2query_deserializeOpDisassociateNatGatewayAddress struct { +type awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation struct { } -func (*awsEc2query_deserializeOpDisassociateNatGatewayAddress) ID() string { +func (*awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateNatGatewayAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableTransitGatewayRouteTablePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40884,9 +40731,9 @@ func (m *awsEc2query_deserializeOpDisassociateNatGatewayAddress) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateNatGatewayAddress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableTransitGatewayRouteTablePropagation(response, &metadata) } - output := &DisassociateNatGatewayAddressOutput{} + output := &DisableTransitGatewayRouteTablePropagationOutput{} out.Result = output var buff [1024]byte @@ -40907,7 +40754,7 @@ func (m *awsEc2query_deserializeOpDisassociateNatGatewayAddress) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateNatGatewayAddressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableTransitGatewayRouteTablePropagationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -40921,7 +40768,7 @@ func (m *awsEc2query_deserializeOpDisassociateNatGatewayAddress) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateNatGatewayAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableTransitGatewayRouteTablePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -40954,14 +40801,14 @@ func awsEc2query_deserializeOpErrorDisassociateNatGatewayAddress(response *smith } } -type awsEc2query_deserializeOpDisassociateRouteServer struct { +type awsEc2query_deserializeOpDisableVgwRoutePropagation struct { } -func (*awsEc2query_deserializeOpDisassociateRouteServer) ID() string { +func (*awsEc2query_deserializeOpDisableVgwRoutePropagation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableVgwRoutePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -40979,44 +40826,21 @@ func (m *awsEc2query_deserializeOpDisassociateRouteServer) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateRouteServer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableVgwRoutePropagation(response, &metadata) } - output := &DisassociateRouteServerOutput{} + output := &DisableVgwRoutePropagationOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateRouteServerOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableVgwRoutePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41049,14 +40873,14 @@ func awsEc2query_deserializeOpErrorDisassociateRouteServer(response *smithyhttp. } } -type awsEc2query_deserializeOpDisassociateRouteTable struct { +type awsEc2query_deserializeOpDisableVpcClassicLink struct { } -func (*awsEc2query_deserializeOpDisassociateRouteTable) ID() string { +func (*awsEc2query_deserializeOpDisableVpcClassicLink) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableVpcClassicLink) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41074,21 +40898,44 @@ func (m *awsEc2query_deserializeOpDisassociateRouteTable) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableVpcClassicLink(response, &metadata) } - output := &DisassociateRouteTableOutput{} + output := &DisableVpcClassicLinkOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDisableVpcClassicLinkOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableVpcClassicLink(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41121,14 +40968,14 @@ func awsEc2query_deserializeOpErrorDisassociateRouteTable(response *smithyhttp.R } } -type awsEc2query_deserializeOpDisassociateSecurityGroupVpc struct { +type awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport struct { } -func (*awsEc2query_deserializeOpDisassociateSecurityGroupVpc) ID() string { +func (*awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateSecurityGroupVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisableVpcClassicLinkDnsSupport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41146,9 +40993,9 @@ func (m *awsEc2query_deserializeOpDisassociateSecurityGroupVpc) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateSecurityGroupVpc(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisableVpcClassicLinkDnsSupport(response, &metadata) } - output := &DisassociateSecurityGroupVpcOutput{} + output := &DisableVpcClassicLinkDnsSupportOutput{} out.Result = output var buff [1024]byte @@ -41169,7 +41016,7 @@ func (m *awsEc2query_deserializeOpDisassociateSecurityGroupVpc) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateSecurityGroupVpcOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisableVpcClassicLinkDnsSupportOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41183,7 +41030,7 @@ func (m *awsEc2query_deserializeOpDisassociateSecurityGroupVpc) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateSecurityGroupVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisableVpcClassicLinkDnsSupport(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41216,14 +41063,14 @@ func awsEc2query_deserializeOpErrorDisassociateSecurityGroupVpc(response *smithy } } -type awsEc2query_deserializeOpDisassociateSubnetCidrBlock struct { +type awsEc2query_deserializeOpDisassociateAddress struct { } -func (*awsEc2query_deserializeOpDisassociateSubnetCidrBlock) ID() string { +func (*awsEc2query_deserializeOpDisassociateAddress) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateSubnetCidrBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41241,44 +41088,21 @@ func (m *awsEc2query_deserializeOpDisassociateSubnetCidrBlock) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateSubnetCidrBlock(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateAddress(response, &metadata) } - output := &DisassociateSubnetCidrBlockOutput{} + output := &DisassociateAddressOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateSubnetCidrBlockOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateSubnetCidrBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41311,14 +41135,14 @@ func awsEc2query_deserializeOpErrorDisassociateSubnetCidrBlock(response *smithyh } } -type awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain struct { +type awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner struct { } -func (*awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) ID() string { +func (*awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateCapacityReservationBillingOwner) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41336,9 +41160,9 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) Han } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateTransitGatewayMulticastDomain(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateCapacityReservationBillingOwner(response, &metadata) } - output := &DisassociateTransitGatewayMulticastDomainOutput{} + output := &DisassociateCapacityReservationBillingOwnerOutput{} out.Result = output var buff [1024]byte @@ -41359,7 +41183,7 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) Han } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateTransitGatewayMulticastDomainOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateCapacityReservationBillingOwnerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41373,7 +41197,7 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) Han return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateTransitGatewayMulticastDomain(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateCapacityReservationBillingOwner(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41406,14 +41230,14 @@ func awsEc2query_deserializeOpErrorDisassociateTransitGatewayMulticastDomain(res } } -type awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable struct { +type awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork struct { } -func (*awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) ID() string { +func (*awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateClientVpnTargetNetwork) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41431,9 +41255,9 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateTransitGatewayPolicyTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateClientVpnTargetNetwork(response, &metadata) } - output := &DisassociateTransitGatewayPolicyTableOutput{} + output := &DisassociateClientVpnTargetNetworkOutput{} out.Result = output var buff [1024]byte @@ -41454,7 +41278,7 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateTransitGatewayPolicyTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateClientVpnTargetNetworkOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41468,7 +41292,7 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateTransitGatewayPolicyTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateClientVpnTargetNetwork(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41501,14 +41325,14 @@ func awsEc2query_deserializeOpErrorDisassociateTransitGatewayPolicyTable(respons } } -type awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable struct { +type awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole struct { } -func (*awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) ID() string { +func (*awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateEnclaveCertificateIamRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41526,9 +41350,9 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateTransitGatewayRouteTable(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateEnclaveCertificateIamRole(response, &metadata) } - output := &DisassociateTransitGatewayRouteTableOutput{} + output := &DisassociateEnclaveCertificateIamRoleOutput{} out.Result = output var buff [1024]byte @@ -41549,7 +41373,7 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateTransitGatewayRouteTableOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateEnclaveCertificateIamRoleOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41563,7 +41387,7 @@ func (m *awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateTransitGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateEnclaveCertificateIamRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41596,14 +41420,14 @@ func awsEc2query_deserializeOpErrorDisassociateTransitGatewayRouteTable(response } } -type awsEc2query_deserializeOpDisassociateTrunkInterface struct { +type awsEc2query_deserializeOpDisassociateIamInstanceProfile struct { } -func (*awsEc2query_deserializeOpDisassociateTrunkInterface) ID() string { +func (*awsEc2query_deserializeOpDisassociateIamInstanceProfile) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateTrunkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateIamInstanceProfile) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41621,9 +41445,9 @@ func (m *awsEc2query_deserializeOpDisassociateTrunkInterface) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateTrunkInterface(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateIamInstanceProfile(response, &metadata) } - output := &DisassociateTrunkInterfaceOutput{} + output := &DisassociateIamInstanceProfileOutput{} out.Result = output var buff [1024]byte @@ -41644,7 +41468,7 @@ func (m *awsEc2query_deserializeOpDisassociateTrunkInterface) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateTrunkInterfaceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateIamInstanceProfileOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41658,7 +41482,7 @@ func (m *awsEc2query_deserializeOpDisassociateTrunkInterface) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateTrunkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateIamInstanceProfile(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41691,14 +41515,14 @@ func awsEc2query_deserializeOpErrorDisassociateTrunkInterface(response *smithyht } } -type awsEc2query_deserializeOpDisassociateVpcCidrBlock struct { +type awsEc2query_deserializeOpDisassociateInstanceEventWindow struct { } -func (*awsEc2query_deserializeOpDisassociateVpcCidrBlock) ID() string { +func (*awsEc2query_deserializeOpDisassociateInstanceEventWindow) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpDisassociateVpcCidrBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41716,9 +41540,9 @@ func (m *awsEc2query_deserializeOpDisassociateVpcCidrBlock) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorDisassociateVpcCidrBlock(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateInstanceEventWindow(response, &metadata) } - output := &DisassociateVpcCidrBlockOutput{} + output := &DisassociateInstanceEventWindowOutput{} out.Result = output var buff [1024]byte @@ -41739,7 +41563,7 @@ func (m *awsEc2query_deserializeOpDisassociateVpcCidrBlock) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentDisassociateVpcCidrBlockOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateInstanceEventWindowOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41753,7 +41577,7 @@ func (m *awsEc2query_deserializeOpDisassociateVpcCidrBlock) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorDisassociateVpcCidrBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41786,14 +41610,14 @@ func awsEc2query_deserializeOpErrorDisassociateVpcCidrBlock(response *smithyhttp } } -type awsEc2query_deserializeOpEnableAddressTransfer struct { +type awsEc2query_deserializeOpDisassociateIpamByoasn struct { } -func (*awsEc2query_deserializeOpEnableAddressTransfer) ID() string { +func (*awsEc2query_deserializeOpDisassociateIpamByoasn) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableAddressTransfer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41811,9 +41635,9 @@ func (m *awsEc2query_deserializeOpEnableAddressTransfer) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableAddressTransfer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateIpamByoasn(response, &metadata) } - output := &EnableAddressTransferOutput{} + output := &DisassociateIpamByoasnOutput{} out.Result = output var buff [1024]byte @@ -41834,7 +41658,7 @@ func (m *awsEc2query_deserializeOpEnableAddressTransfer) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableAddressTransferOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateIpamByoasnOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41848,7 +41672,7 @@ func (m *awsEc2query_deserializeOpEnableAddressTransfer) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableAddressTransfer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41881,14 +41705,14 @@ func awsEc2query_deserializeOpErrorEnableAddressTransfer(response *smithyhttp.Re } } -type awsEc2query_deserializeOpEnableAllowedImagesSettings struct { +type awsEc2query_deserializeOpDisassociateIpamResourceDiscovery struct { } -func (*awsEc2query_deserializeOpEnableAllowedImagesSettings) ID() string { +func (*awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41906,9 +41730,9 @@ func (m *awsEc2query_deserializeOpEnableAllowedImagesSettings) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableAllowedImagesSettings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateIpamResourceDiscovery(response, &metadata) } - output := &EnableAllowedImagesSettingsOutput{} + output := &DisassociateIpamResourceDiscoveryOutput{} out.Result = output var buff [1024]byte @@ -41929,7 +41753,7 @@ func (m *awsEc2query_deserializeOpEnableAllowedImagesSettings) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableAllowedImagesSettingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateIpamResourceDiscoveryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -41943,7 +41767,7 @@ func (m *awsEc2query_deserializeOpEnableAllowedImagesSettings) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -41976,14 +41800,14 @@ func awsEc2query_deserializeOpErrorEnableAllowedImagesSettings(response *smithyh } } -type awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription struct { +type awsEc2query_deserializeOpDisassociateNatGatewayAddress struct { } -func (*awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) ID() string { +func (*awsEc2query_deserializeOpDisassociateNatGatewayAddress) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateNatGatewayAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42001,9 +41825,9 @@ func (m *awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableAwsNetworkPerformanceMetricSubscription(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateNatGatewayAddress(response, &metadata) } - output := &EnableAwsNetworkPerformanceMetricSubscriptionOutput{} + output := &DisassociateNatGatewayAddressOutput{} out.Result = output var buff [1024]byte @@ -42024,7 +41848,7 @@ func (m *awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableAwsNetworkPerformanceMetricSubscriptionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateNatGatewayAddressOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42038,7 +41862,7 @@ func (m *awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableAwsNetworkPerformanceMetricSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateNatGatewayAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42071,14 +41895,14 @@ func awsEc2query_deserializeOpErrorEnableAwsNetworkPerformanceMetricSubscription } } -type awsEc2query_deserializeOpEnableEbsEncryptionByDefault struct { +type awsEc2query_deserializeOpDisassociateRouteServer struct { } -func (*awsEc2query_deserializeOpEnableEbsEncryptionByDefault) ID() string { +func (*awsEc2query_deserializeOpDisassociateRouteServer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableEbsEncryptionByDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42096,9 +41920,9 @@ func (m *awsEc2query_deserializeOpEnableEbsEncryptionByDefault) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableEbsEncryptionByDefault(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateRouteServer(response, &metadata) } - output := &EnableEbsEncryptionByDefaultOutput{} + output := &DisassociateRouteServerOutput{} out.Result = output var buff [1024]byte @@ -42119,7 +41943,7 @@ func (m *awsEc2query_deserializeOpEnableEbsEncryptionByDefault) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableEbsEncryptionByDefaultOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateRouteServerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42133,7 +41957,7 @@ func (m *awsEc2query_deserializeOpEnableEbsEncryptionByDefault) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableEbsEncryptionByDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42166,14 +41990,14 @@ func awsEc2query_deserializeOpErrorEnableEbsEncryptionByDefault(response *smithy } } -type awsEc2query_deserializeOpEnableFastLaunch struct { +type awsEc2query_deserializeOpDisassociateRouteTable struct { } -func (*awsEc2query_deserializeOpEnableFastLaunch) ID() string { +func (*awsEc2query_deserializeOpDisassociateRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableFastLaunch) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42191,44 +42015,21 @@ func (m *awsEc2query_deserializeOpEnableFastLaunch) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableFastLaunch(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateRouteTable(response, &metadata) } - output := &EnableFastLaunchOutput{} + output := &DisassociateRouteTableOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableFastLaunchOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableFastLaunch(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42261,14 +42062,14 @@ func awsEc2query_deserializeOpErrorEnableFastLaunch(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpEnableFastSnapshotRestores struct { +type awsEc2query_deserializeOpDisassociateSecurityGroupVpc struct { } -func (*awsEc2query_deserializeOpEnableFastSnapshotRestores) ID() string { +func (*awsEc2query_deserializeOpDisassociateSecurityGroupVpc) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableFastSnapshotRestores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateSecurityGroupVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42286,9 +42087,9 @@ func (m *awsEc2query_deserializeOpEnableFastSnapshotRestores) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableFastSnapshotRestores(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateSecurityGroupVpc(response, &metadata) } - output := &EnableFastSnapshotRestoresOutput{} + output := &DisassociateSecurityGroupVpcOutput{} out.Result = output var buff [1024]byte @@ -42309,7 +42110,7 @@ func (m *awsEc2query_deserializeOpEnableFastSnapshotRestores) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableFastSnapshotRestoresOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateSecurityGroupVpcOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42323,7 +42124,7 @@ func (m *awsEc2query_deserializeOpEnableFastSnapshotRestores) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableFastSnapshotRestores(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateSecurityGroupVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42356,14 +42157,14 @@ func awsEc2query_deserializeOpErrorEnableFastSnapshotRestores(response *smithyht } } -type awsEc2query_deserializeOpEnableImage struct { +type awsEc2query_deserializeOpDisassociateSubnetCidrBlock struct { } -func (*awsEc2query_deserializeOpEnableImage) ID() string { +func (*awsEc2query_deserializeOpDisassociateSubnetCidrBlock) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateSubnetCidrBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42381,9 +42182,9 @@ func (m *awsEc2query_deserializeOpEnableImage) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateSubnetCidrBlock(response, &metadata) } - output := &EnableImageOutput{} + output := &DisassociateSubnetCidrBlockOutput{} out.Result = output var buff [1024]byte @@ -42404,7 +42205,7 @@ func (m *awsEc2query_deserializeOpEnableImage) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateSubnetCidrBlockOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42418,7 +42219,7 @@ func (m *awsEc2query_deserializeOpEnableImage) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateSubnetCidrBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42451,14 +42252,14 @@ func awsEc2query_deserializeOpErrorEnableImage(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpEnableImageBlockPublicAccess struct { +type awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain struct { } -func (*awsEc2query_deserializeOpEnableImageBlockPublicAccess) ID() string { +func (*awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableImageBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateTransitGatewayMulticastDomain) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42476,9 +42277,9 @@ func (m *awsEc2query_deserializeOpEnableImageBlockPublicAccess) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableImageBlockPublicAccess(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateTransitGatewayMulticastDomain(response, &metadata) } - output := &EnableImageBlockPublicAccessOutput{} + output := &DisassociateTransitGatewayMulticastDomainOutput{} out.Result = output var buff [1024]byte @@ -42499,7 +42300,7 @@ func (m *awsEc2query_deserializeOpEnableImageBlockPublicAccess) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableImageBlockPublicAccessOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateTransitGatewayMulticastDomainOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42513,7 +42314,7 @@ func (m *awsEc2query_deserializeOpEnableImageBlockPublicAccess) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableImageBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateTransitGatewayMulticastDomain(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42546,14 +42347,14 @@ func awsEc2query_deserializeOpErrorEnableImageBlockPublicAccess(response *smithy } } -type awsEc2query_deserializeOpEnableImageDeprecation struct { +type awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable struct { } -func (*awsEc2query_deserializeOpEnableImageDeprecation) ID() string { +func (*awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableImageDeprecation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateTransitGatewayPolicyTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42571,9 +42372,9 @@ func (m *awsEc2query_deserializeOpEnableImageDeprecation) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableImageDeprecation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateTransitGatewayPolicyTable(response, &metadata) } - output := &EnableImageDeprecationOutput{} + output := &DisassociateTransitGatewayPolicyTableOutput{} out.Result = output var buff [1024]byte @@ -42594,7 +42395,7 @@ func (m *awsEc2query_deserializeOpEnableImageDeprecation) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableImageDeprecationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateTransitGatewayPolicyTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42608,7 +42409,7 @@ func (m *awsEc2query_deserializeOpEnableImageDeprecation) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableImageDeprecation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateTransitGatewayPolicyTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42641,14 +42442,14 @@ func awsEc2query_deserializeOpErrorEnableImageDeprecation(response *smithyhttp.R } } -type awsEc2query_deserializeOpEnableImageDeregistrationProtection struct { +type awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable struct { } -func (*awsEc2query_deserializeOpEnableImageDeregistrationProtection) ID() string { +func (*awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableImageDeregistrationProtection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateTransitGatewayRouteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42666,9 +42467,9 @@ func (m *awsEc2query_deserializeOpEnableImageDeregistrationProtection) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableImageDeregistrationProtection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateTransitGatewayRouteTable(response, &metadata) } - output := &EnableImageDeregistrationProtectionOutput{} + output := &DisassociateTransitGatewayRouteTableOutput{} out.Result = output var buff [1024]byte @@ -42689,7 +42490,7 @@ func (m *awsEc2query_deserializeOpEnableImageDeregistrationProtection) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableImageDeregistrationProtectionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateTransitGatewayRouteTableOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42703,7 +42504,7 @@ func (m *awsEc2query_deserializeOpEnableImageDeregistrationProtection) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableImageDeregistrationProtection(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateTransitGatewayRouteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42736,14 +42537,14 @@ func awsEc2query_deserializeOpErrorEnableImageDeregistrationProtection(response } } -type awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount struct { +type awsEc2query_deserializeOpDisassociateTrunkInterface struct { } -func (*awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) ID() string { +func (*awsEc2query_deserializeOpDisassociateTrunkInterface) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateTrunkInterface) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42761,9 +42562,9 @@ func (m *awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableIpamOrganizationAdminAccount(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateTrunkInterface(response, &metadata) } - output := &EnableIpamOrganizationAdminAccountOutput{} + output := &DisassociateTrunkInterfaceOutput{} out.Result = output var buff [1024]byte @@ -42784,7 +42585,7 @@ func (m *awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableIpamOrganizationAdminAccountOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateTrunkInterfaceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42798,7 +42599,7 @@ func (m *awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableIpamOrganizationAdminAccount(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateTrunkInterface(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42831,14 +42632,14 @@ func awsEc2query_deserializeOpErrorEnableIpamOrganizationAdminAccount(response * } } -type awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing struct { +type awsEc2query_deserializeOpDisassociateVpcCidrBlock struct { } -func (*awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) ID() string { +func (*awsEc2query_deserializeOpDisassociateVpcCidrBlock) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpDisassociateVpcCidrBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42856,9 +42657,9 @@ func (m *awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableReachabilityAnalyzerOrganizationSharing(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorDisassociateVpcCidrBlock(response, &metadata) } - output := &EnableReachabilityAnalyzerOrganizationSharingOutput{} + output := &DisassociateVpcCidrBlockOutput{} out.Result = output var buff [1024]byte @@ -42879,7 +42680,7 @@ func (m *awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableReachabilityAnalyzerOrganizationSharingOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentDisassociateVpcCidrBlockOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42893,7 +42694,7 @@ func (m *awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableReachabilityAnalyzerOrganizationSharing(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorDisassociateVpcCidrBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -42926,14 +42727,14 @@ func awsEc2query_deserializeOpErrorEnableReachabilityAnalyzerOrganizationSharing } } -type awsEc2query_deserializeOpEnableRouteServerPropagation struct { +type awsEc2query_deserializeOpEnableAddressTransfer struct { } -func (*awsEc2query_deserializeOpEnableRouteServerPropagation) ID() string { +func (*awsEc2query_deserializeOpEnableAddressTransfer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableRouteServerPropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableAddressTransfer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -42951,9 +42752,9 @@ func (m *awsEc2query_deserializeOpEnableRouteServerPropagation) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableRouteServerPropagation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableAddressTransfer(response, &metadata) } - output := &EnableRouteServerPropagationOutput{} + output := &EnableAddressTransferOutput{} out.Result = output var buff [1024]byte @@ -42974,7 +42775,7 @@ func (m *awsEc2query_deserializeOpEnableRouteServerPropagation) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableRouteServerPropagationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableAddressTransferOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -42988,7 +42789,7 @@ func (m *awsEc2query_deserializeOpEnableRouteServerPropagation) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableRouteServerPropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableAddressTransfer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43021,14 +42822,14 @@ func awsEc2query_deserializeOpErrorEnableRouteServerPropagation(response *smithy } } -type awsEc2query_deserializeOpEnableSerialConsoleAccess struct { +type awsEc2query_deserializeOpEnableAllowedImagesSettings struct { } -func (*awsEc2query_deserializeOpEnableSerialConsoleAccess) ID() string { +func (*awsEc2query_deserializeOpEnableAllowedImagesSettings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableSerialConsoleAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43046,9 +42847,9 @@ func (m *awsEc2query_deserializeOpEnableSerialConsoleAccess) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableSerialConsoleAccess(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableAllowedImagesSettings(response, &metadata) } - output := &EnableSerialConsoleAccessOutput{} + output := &EnableAllowedImagesSettingsOutput{} out.Result = output var buff [1024]byte @@ -43069,7 +42870,7 @@ func (m *awsEc2query_deserializeOpEnableSerialConsoleAccess) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableSerialConsoleAccessOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableAllowedImagesSettingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43083,7 +42884,7 @@ func (m *awsEc2query_deserializeOpEnableSerialConsoleAccess) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableSerialConsoleAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43116,14 +42917,14 @@ func awsEc2query_deserializeOpErrorEnableSerialConsoleAccess(response *smithyhtt } } -type awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess struct { +type awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription struct { } -func (*awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) ID() string { +func (*awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableAwsNetworkPerformanceMetricSubscription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43141,9 +42942,9 @@ func (m *awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableSnapshotBlockPublicAccess(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableAwsNetworkPerformanceMetricSubscription(response, &metadata) } - output := &EnableSnapshotBlockPublicAccessOutput{} + output := &EnableAwsNetworkPerformanceMetricSubscriptionOutput{} out.Result = output var buff [1024]byte @@ -43164,7 +42965,7 @@ func (m *awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableSnapshotBlockPublicAccessOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableAwsNetworkPerformanceMetricSubscriptionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43178,7 +42979,7 @@ func (m *awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableSnapshotBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableAwsNetworkPerformanceMetricSubscription(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43211,14 +43012,14 @@ func awsEc2query_deserializeOpErrorEnableSnapshotBlockPublicAccess(response *smi } } -type awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation struct { +type awsEc2query_deserializeOpEnableEbsEncryptionByDefault struct { } -func (*awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) ID() string { +func (*awsEc2query_deserializeOpEnableEbsEncryptionByDefault) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableEbsEncryptionByDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43236,9 +43037,9 @@ func (m *awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) Han } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableTransitGatewayRouteTablePropagation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableEbsEncryptionByDefault(response, &metadata) } - output := &EnableTransitGatewayRouteTablePropagationOutput{} + output := &EnableEbsEncryptionByDefaultOutput{} out.Result = output var buff [1024]byte @@ -43259,7 +43060,7 @@ func (m *awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) Han } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableTransitGatewayRouteTablePropagationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableEbsEncryptionByDefaultOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43273,7 +43074,7 @@ func (m *awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) Han return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableTransitGatewayRouteTablePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableEbsEncryptionByDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43306,14 +43107,14 @@ func awsEc2query_deserializeOpErrorEnableTransitGatewayRouteTablePropagation(res } } -type awsEc2query_deserializeOpEnableVgwRoutePropagation struct { +type awsEc2query_deserializeOpEnableFastLaunch struct { } -func (*awsEc2query_deserializeOpEnableVgwRoutePropagation) ID() string { +func (*awsEc2query_deserializeOpEnableFastLaunch) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableVgwRoutePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableFastLaunch) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43331,21 +43132,44 @@ func (m *awsEc2query_deserializeOpEnableVgwRoutePropagation) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableVgwRoutePropagation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableFastLaunch(response, &metadata) } - output := &EnableVgwRoutePropagationOutput{} + output := &EnableFastLaunchOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentEnableFastLaunchOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableVgwRoutePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableFastLaunch(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43378,14 +43202,14 @@ func awsEc2query_deserializeOpErrorEnableVgwRoutePropagation(response *smithyhtt } } -type awsEc2query_deserializeOpEnableVolumeIO struct { +type awsEc2query_deserializeOpEnableFastSnapshotRestores struct { } -func (*awsEc2query_deserializeOpEnableVolumeIO) ID() string { +func (*awsEc2query_deserializeOpEnableFastSnapshotRestores) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableVolumeIO) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableFastSnapshotRestores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43403,21 +43227,44 @@ func (m *awsEc2query_deserializeOpEnableVolumeIO) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableVolumeIO(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableFastSnapshotRestores(response, &metadata) } - output := &EnableVolumeIOOutput{} + output := &EnableFastSnapshotRestoresOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentEnableFastSnapshotRestoresOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableVolumeIO(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableFastSnapshotRestores(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43450,14 +43297,14 @@ func awsEc2query_deserializeOpErrorEnableVolumeIO(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpEnableVpcClassicLink struct { +type awsEc2query_deserializeOpEnableImage struct { } -func (*awsEc2query_deserializeOpEnableVpcClassicLink) ID() string { +func (*awsEc2query_deserializeOpEnableImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableVpcClassicLink) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43475,9 +43322,9 @@ func (m *awsEc2query_deserializeOpEnableVpcClassicLink) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableVpcClassicLink(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableImage(response, &metadata) } - output := &EnableVpcClassicLinkOutput{} + output := &EnableImageOutput{} out.Result = output var buff [1024]byte @@ -43498,7 +43345,7 @@ func (m *awsEc2query_deserializeOpEnableVpcClassicLink) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableVpcClassicLinkOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43512,7 +43359,7 @@ func (m *awsEc2query_deserializeOpEnableVpcClassicLink) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableVpcClassicLink(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43545,14 +43392,14 @@ func awsEc2query_deserializeOpErrorEnableVpcClassicLink(response *smithyhttp.Res } } -type awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport struct { +type awsEc2query_deserializeOpEnableImageBlockPublicAccess struct { } -func (*awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) ID() string { +func (*awsEc2query_deserializeOpEnableImageBlockPublicAccess) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableImageBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43570,9 +43417,9 @@ func (m *awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorEnableVpcClassicLinkDnsSupport(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableImageBlockPublicAccess(response, &metadata) } - output := &EnableVpcClassicLinkDnsSupportOutput{} + output := &EnableImageBlockPublicAccessOutput{} out.Result = output var buff [1024]byte @@ -43593,7 +43440,7 @@ func (m *awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentEnableVpcClassicLinkDnsSupportOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableImageBlockPublicAccessOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43607,7 +43454,7 @@ func (m *awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorEnableVpcClassicLinkDnsSupport(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableImageBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43640,14 +43487,14 @@ func awsEc2query_deserializeOpErrorEnableVpcClassicLinkDnsSupport(response *smit } } -type awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList struct { +type awsEc2query_deserializeOpEnableImageDeprecation struct { } -func (*awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList) ID() string { +func (*awsEc2query_deserializeOpEnableImageDeprecation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableImageDeprecation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43665,9 +43512,9 @@ func (m *awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorExportClientVpnClientCertificateRevocationList(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableImageDeprecation(response, &metadata) } - output := &ExportClientVpnClientCertificateRevocationListOutput{} + output := &EnableImageDeprecationOutput{} out.Result = output var buff [1024]byte @@ -43688,7 +43535,7 @@ func (m *awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentExportClientVpnClientCertificateRevocationListOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableImageDeprecationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43702,7 +43549,7 @@ func (m *awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList return out, metadata, err } -func awsEc2query_deserializeOpErrorExportClientVpnClientCertificateRevocationList(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableImageDeprecation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43735,14 +43582,14 @@ func awsEc2query_deserializeOpErrorExportClientVpnClientCertificateRevocationLis } } -type awsEc2query_deserializeOpExportClientVpnClientConfiguration struct { +type awsEc2query_deserializeOpEnableImageDeregistrationProtection struct { } -func (*awsEc2query_deserializeOpExportClientVpnClientConfiguration) ID() string { +func (*awsEc2query_deserializeOpEnableImageDeregistrationProtection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpExportClientVpnClientConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableImageDeregistrationProtection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43760,9 +43607,9 @@ func (m *awsEc2query_deserializeOpExportClientVpnClientConfiguration) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorExportClientVpnClientConfiguration(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableImageDeregistrationProtection(response, &metadata) } - output := &ExportClientVpnClientConfigurationOutput{} + output := &EnableImageDeregistrationProtectionOutput{} out.Result = output var buff [1024]byte @@ -43783,7 +43630,7 @@ func (m *awsEc2query_deserializeOpExportClientVpnClientConfiguration) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentExportClientVpnClientConfigurationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableImageDeregistrationProtectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43797,7 +43644,7 @@ func (m *awsEc2query_deserializeOpExportClientVpnClientConfiguration) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorExportClientVpnClientConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableImageDeregistrationProtection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43830,14 +43677,14 @@ func awsEc2query_deserializeOpErrorExportClientVpnClientConfiguration(response * } } -type awsEc2query_deserializeOpExportImage struct { +type awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount struct { } -func (*awsEc2query_deserializeOpExportImage) ID() string { +func (*awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpExportImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableIpamOrganizationAdminAccount) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43855,9 +43702,9 @@ func (m *awsEc2query_deserializeOpExportImage) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorExportImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableIpamOrganizationAdminAccount(response, &metadata) } - output := &ExportImageOutput{} + output := &EnableIpamOrganizationAdminAccountOutput{} out.Result = output var buff [1024]byte @@ -43878,7 +43725,7 @@ func (m *awsEc2query_deserializeOpExportImage) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentExportImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableIpamOrganizationAdminAccountOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43892,7 +43739,7 @@ func (m *awsEc2query_deserializeOpExportImage) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorExportImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableIpamOrganizationAdminAccount(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -43925,14 +43772,14 @@ func awsEc2query_deserializeOpErrorExportImage(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpExportTransitGatewayRoutes struct { +type awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing struct { } -func (*awsEc2query_deserializeOpExportTransitGatewayRoutes) ID() string { +func (*awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpExportTransitGatewayRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableReachabilityAnalyzerOrganizationSharing) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -43950,9 +43797,9 @@ func (m *awsEc2query_deserializeOpExportTransitGatewayRoutes) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorExportTransitGatewayRoutes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableReachabilityAnalyzerOrganizationSharing(response, &metadata) } - output := &ExportTransitGatewayRoutesOutput{} + output := &EnableReachabilityAnalyzerOrganizationSharingOutput{} out.Result = output var buff [1024]byte @@ -43973,7 +43820,7 @@ func (m *awsEc2query_deserializeOpExportTransitGatewayRoutes) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentExportTransitGatewayRoutesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableReachabilityAnalyzerOrganizationSharingOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -43987,7 +43834,7 @@ func (m *awsEc2query_deserializeOpExportTransitGatewayRoutes) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorExportTransitGatewayRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableReachabilityAnalyzerOrganizationSharing(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44020,14 +43867,14 @@ func awsEc2query_deserializeOpErrorExportTransitGatewayRoutes(response *smithyht } } -type awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguration struct { +type awsEc2query_deserializeOpEnableRouteServerPropagation struct { } -func (*awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguration) ID() string { +func (*awsEc2query_deserializeOpEnableRouteServerPropagation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableRouteServerPropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44045,9 +43892,9 @@ func (m *awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguratio } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorExportVerifiedAccessInstanceClientConfiguration(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableRouteServerPropagation(response, &metadata) } - output := &ExportVerifiedAccessInstanceClientConfigurationOutput{} + output := &EnableRouteServerPropagationOutput{} out.Result = output var buff [1024]byte @@ -44068,7 +43915,7 @@ func (m *awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguratio } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentExportVerifiedAccessInstanceClientConfigurationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableRouteServerPropagationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44082,7 +43929,7 @@ func (m *awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguratio return out, metadata, err } -func awsEc2query_deserializeOpErrorExportVerifiedAccessInstanceClientConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableRouteServerPropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44115,14 +43962,14 @@ func awsEc2query_deserializeOpErrorExportVerifiedAccessInstanceClientConfigurati } } -type awsEc2query_deserializeOpGetAllowedImagesSettings struct { +type awsEc2query_deserializeOpEnableSerialConsoleAccess struct { } -func (*awsEc2query_deserializeOpGetAllowedImagesSettings) ID() string { +func (*awsEc2query_deserializeOpEnableSerialConsoleAccess) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableSerialConsoleAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44140,9 +43987,9 @@ func (m *awsEc2query_deserializeOpGetAllowedImagesSettings) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetAllowedImagesSettings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableSerialConsoleAccess(response, &metadata) } - output := &GetAllowedImagesSettingsOutput{} + output := &EnableSerialConsoleAccessOutput{} out.Result = output var buff [1024]byte @@ -44163,7 +44010,7 @@ func (m *awsEc2query_deserializeOpGetAllowedImagesSettings) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetAllowedImagesSettingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableSerialConsoleAccessOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44177,7 +44024,7 @@ func (m *awsEc2query_deserializeOpGetAllowedImagesSettings) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorGetAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableSerialConsoleAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44210,14 +44057,14 @@ func awsEc2query_deserializeOpErrorGetAllowedImagesSettings(response *smithyhttp } } -type awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles struct { +type awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess struct { } -func (*awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) ID() string { +func (*awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableSnapshotBlockPublicAccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44235,9 +44082,9 @@ func (m *awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetAssociatedEnclaveCertificateIamRoles(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableSnapshotBlockPublicAccess(response, &metadata) } - output := &GetAssociatedEnclaveCertificateIamRolesOutput{} + output := &EnableSnapshotBlockPublicAccessOutput{} out.Result = output var buff [1024]byte @@ -44258,7 +44105,7 @@ func (m *awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetAssociatedEnclaveCertificateIamRolesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableSnapshotBlockPublicAccessOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44272,7 +44119,7 @@ func (m *awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorGetAssociatedEnclaveCertificateIamRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableSnapshotBlockPublicAccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44305,14 +44152,14 @@ func awsEc2query_deserializeOpErrorGetAssociatedEnclaveCertificateIamRoles(respo } } -type awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs struct { +type awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation struct { } -func (*awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) ID() string { +func (*awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableTransitGatewayRouteTablePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44330,9 +44177,9 @@ func (m *awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetAssociatedIpv6PoolCidrs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableTransitGatewayRouteTablePropagation(response, &metadata) } - output := &GetAssociatedIpv6PoolCidrsOutput{} + output := &EnableTransitGatewayRouteTablePropagationOutput{} out.Result = output var buff [1024]byte @@ -44353,7 +44200,7 @@ func (m *awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetAssociatedIpv6PoolCidrsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableTransitGatewayRouteTablePropagationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44367,7 +44214,7 @@ func (m *awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorGetAssociatedIpv6PoolCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableTransitGatewayRouteTablePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44400,14 +44247,14 @@ func awsEc2query_deserializeOpErrorGetAssociatedIpv6PoolCidrs(response *smithyht } } -type awsEc2query_deserializeOpGetAwsNetworkPerformanceData struct { +type awsEc2query_deserializeOpEnableVgwRoutePropagation struct { } -func (*awsEc2query_deserializeOpGetAwsNetworkPerformanceData) ID() string { +func (*awsEc2query_deserializeOpEnableVgwRoutePropagation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetAwsNetworkPerformanceData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableVgwRoutePropagation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44425,44 +44272,21 @@ func (m *awsEc2query_deserializeOpGetAwsNetworkPerformanceData) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetAwsNetworkPerformanceData(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableVgwRoutePropagation(response, &metadata) } - output := &GetAwsNetworkPerformanceDataOutput{} + output := &EnableVgwRoutePropagationOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetAwsNetworkPerformanceDataOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorGetAwsNetworkPerformanceData(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableVgwRoutePropagation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44495,14 +44319,14 @@ func awsEc2query_deserializeOpErrorGetAwsNetworkPerformanceData(response *smithy } } -type awsEc2query_deserializeOpGetCapacityReservationUsage struct { +type awsEc2query_deserializeOpEnableVolumeIO struct { } -func (*awsEc2query_deserializeOpGetCapacityReservationUsage) ID() string { +func (*awsEc2query_deserializeOpEnableVolumeIO) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetCapacityReservationUsage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableVolumeIO) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44520,44 +44344,21 @@ func (m *awsEc2query_deserializeOpGetCapacityReservationUsage) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetCapacityReservationUsage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableVolumeIO(response, &metadata) } - output := &GetCapacityReservationUsageOutput{} + output := &EnableVolumeIOOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetCapacityReservationUsageOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorGetCapacityReservationUsage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableVolumeIO(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44590,14 +44391,14 @@ func awsEc2query_deserializeOpErrorGetCapacityReservationUsage(response *smithyh } } -type awsEc2query_deserializeOpGetCoipPoolUsage struct { +type awsEc2query_deserializeOpEnableVpcClassicLink struct { } -func (*awsEc2query_deserializeOpGetCoipPoolUsage) ID() string { +func (*awsEc2query_deserializeOpEnableVpcClassicLink) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetCoipPoolUsage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableVpcClassicLink) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44615,9 +44416,9 @@ func (m *awsEc2query_deserializeOpGetCoipPoolUsage) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetCoipPoolUsage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableVpcClassicLink(response, &metadata) } - output := &GetCoipPoolUsageOutput{} + output := &EnableVpcClassicLinkOutput{} out.Result = output var buff [1024]byte @@ -44638,7 +44439,7 @@ func (m *awsEc2query_deserializeOpGetCoipPoolUsage) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetCoipPoolUsageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableVpcClassicLinkOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44652,7 +44453,7 @@ func (m *awsEc2query_deserializeOpGetCoipPoolUsage) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorGetCoipPoolUsage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableVpcClassicLink(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44685,14 +44486,14 @@ func awsEc2query_deserializeOpErrorGetCoipPoolUsage(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpGetConsoleOutput struct { +type awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport struct { } -func (*awsEc2query_deserializeOpGetConsoleOutput) ID() string { +func (*awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetConsoleOutput) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpEnableVpcClassicLinkDnsSupport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44710,9 +44511,9 @@ func (m *awsEc2query_deserializeOpGetConsoleOutput) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetConsoleOutput(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorEnableVpcClassicLinkDnsSupport(response, &metadata) } - output := &GetConsoleOutputOutput{} + output := &EnableVpcClassicLinkDnsSupportOutput{} out.Result = output var buff [1024]byte @@ -44733,7 +44534,7 @@ func (m *awsEc2query_deserializeOpGetConsoleOutput) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetConsoleOutputOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentEnableVpcClassicLinkDnsSupportOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44747,7 +44548,7 @@ func (m *awsEc2query_deserializeOpGetConsoleOutput) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorGetConsoleOutput(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorEnableVpcClassicLinkDnsSupport(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44780,14 +44581,14 @@ func awsEc2query_deserializeOpErrorGetConsoleOutput(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpGetConsoleScreenshot struct { +type awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList struct { } -func (*awsEc2query_deserializeOpGetConsoleScreenshot) ID() string { +func (*awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetConsoleScreenshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpExportClientVpnClientCertificateRevocationList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44805,9 +44606,9 @@ func (m *awsEc2query_deserializeOpGetConsoleScreenshot) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetConsoleScreenshot(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorExportClientVpnClientCertificateRevocationList(response, &metadata) } - output := &GetConsoleScreenshotOutput{} + output := &ExportClientVpnClientCertificateRevocationListOutput{} out.Result = output var buff [1024]byte @@ -44828,7 +44629,7 @@ func (m *awsEc2query_deserializeOpGetConsoleScreenshot) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetConsoleScreenshotOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentExportClientVpnClientCertificateRevocationListOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44842,7 +44643,7 @@ func (m *awsEc2query_deserializeOpGetConsoleScreenshot) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorGetConsoleScreenshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorExportClientVpnClientCertificateRevocationList(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44875,14 +44676,14 @@ func awsEc2query_deserializeOpErrorGetConsoleScreenshot(response *smithyhttp.Res } } -type awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary struct { +type awsEc2query_deserializeOpExportClientVpnClientConfiguration struct { } -func (*awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) ID() string { +func (*awsEc2query_deserializeOpExportClientVpnClientConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpExportClientVpnClientConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44900,9 +44701,9 @@ func (m *awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetDeclarativePoliciesReportSummary(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorExportClientVpnClientConfiguration(response, &metadata) } - output := &GetDeclarativePoliciesReportSummaryOutput{} + output := &ExportClientVpnClientConfigurationOutput{} out.Result = output var buff [1024]byte @@ -44923,7 +44724,7 @@ func (m *awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetDeclarativePoliciesReportSummaryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentExportClientVpnClientConfigurationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -44937,7 +44738,7 @@ func (m *awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorGetDeclarativePoliciesReportSummary(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorExportClientVpnClientConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -44970,14 +44771,14 @@ func awsEc2query_deserializeOpErrorGetDeclarativePoliciesReportSummary(response } } -type awsEc2query_deserializeOpGetDefaultCreditSpecification struct { +type awsEc2query_deserializeOpExportImage struct { } -func (*awsEc2query_deserializeOpGetDefaultCreditSpecification) ID() string { +func (*awsEc2query_deserializeOpExportImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetDefaultCreditSpecification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpExportImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -44995,9 +44796,9 @@ func (m *awsEc2query_deserializeOpGetDefaultCreditSpecification) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetDefaultCreditSpecification(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorExportImage(response, &metadata) } - output := &GetDefaultCreditSpecificationOutput{} + output := &ExportImageOutput{} out.Result = output var buff [1024]byte @@ -45018,7 +44819,7 @@ func (m *awsEc2query_deserializeOpGetDefaultCreditSpecification) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetDefaultCreditSpecificationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentExportImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45032,7 +44833,7 @@ func (m *awsEc2query_deserializeOpGetDefaultCreditSpecification) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorGetDefaultCreditSpecification(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorExportImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45065,14 +44866,14 @@ func awsEc2query_deserializeOpErrorGetDefaultCreditSpecification(response *smith } } -type awsEc2query_deserializeOpGetEbsDefaultKmsKeyId struct { +type awsEc2query_deserializeOpExportTransitGatewayRoutes struct { } -func (*awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) ID() string { +func (*awsEc2query_deserializeOpExportTransitGatewayRoutes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpExportTransitGatewayRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45090,9 +44891,9 @@ func (m *awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetEbsDefaultKmsKeyId(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorExportTransitGatewayRoutes(response, &metadata) } - output := &GetEbsDefaultKmsKeyIdOutput{} + output := &ExportTransitGatewayRoutesOutput{} out.Result = output var buff [1024]byte @@ -45113,7 +44914,7 @@ func (m *awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetEbsDefaultKmsKeyIdOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentExportTransitGatewayRoutesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45127,7 +44928,7 @@ func (m *awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorGetEbsDefaultKmsKeyId(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorExportTransitGatewayRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45160,14 +44961,14 @@ func awsEc2query_deserializeOpErrorGetEbsDefaultKmsKeyId(response *smithyhttp.Re } } -type awsEc2query_deserializeOpGetEbsEncryptionByDefault struct { +type awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguration struct { } -func (*awsEc2query_deserializeOpGetEbsEncryptionByDefault) ID() string { +func (*awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetEbsEncryptionByDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpExportVerifiedAccessInstanceClientConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45185,9 +44986,9 @@ func (m *awsEc2query_deserializeOpGetEbsEncryptionByDefault) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetEbsEncryptionByDefault(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorExportVerifiedAccessInstanceClientConfiguration(response, &metadata) } - output := &GetEbsEncryptionByDefaultOutput{} + output := &ExportVerifiedAccessInstanceClientConfigurationOutput{} out.Result = output var buff [1024]byte @@ -45208,7 +45009,7 @@ func (m *awsEc2query_deserializeOpGetEbsEncryptionByDefault) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetEbsEncryptionByDefaultOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentExportVerifiedAccessInstanceClientConfigurationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45222,7 +45023,7 @@ func (m *awsEc2query_deserializeOpGetEbsEncryptionByDefault) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorGetEbsEncryptionByDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorExportVerifiedAccessInstanceClientConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45255,14 +45056,14 @@ func awsEc2query_deserializeOpErrorGetEbsEncryptionByDefault(response *smithyhtt } } -type awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate struct { +type awsEc2query_deserializeOpGetActiveVpnTunnelStatus struct { } -func (*awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) ID() string { +func (*awsEc2query_deserializeOpGetActiveVpnTunnelStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetActiveVpnTunnelStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45280,9 +45081,9 @@ func (m *awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetFlowLogsIntegrationTemplate(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetActiveVpnTunnelStatus(response, &metadata) } - output := &GetFlowLogsIntegrationTemplateOutput{} + output := &GetActiveVpnTunnelStatusOutput{} out.Result = output var buff [1024]byte @@ -45303,7 +45104,7 @@ func (m *awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetFlowLogsIntegrationTemplateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetActiveVpnTunnelStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45317,7 +45118,7 @@ func (m *awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorGetFlowLogsIntegrationTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetActiveVpnTunnelStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45350,14 +45151,14 @@ func awsEc2query_deserializeOpErrorGetFlowLogsIntegrationTemplate(response *smit } } -type awsEc2query_deserializeOpGetGroupsForCapacityReservation struct { +type awsEc2query_deserializeOpGetAllowedImagesSettings struct { } -func (*awsEc2query_deserializeOpGetGroupsForCapacityReservation) ID() string { +func (*awsEc2query_deserializeOpGetAllowedImagesSettings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetGroupsForCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45375,9 +45176,9 @@ func (m *awsEc2query_deserializeOpGetGroupsForCapacityReservation) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetGroupsForCapacityReservation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetAllowedImagesSettings(response, &metadata) } - output := &GetGroupsForCapacityReservationOutput{} + output := &GetAllowedImagesSettingsOutput{} out.Result = output var buff [1024]byte @@ -45398,7 +45199,7 @@ func (m *awsEc2query_deserializeOpGetGroupsForCapacityReservation) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetGroupsForCapacityReservationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetAllowedImagesSettingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45412,7 +45213,7 @@ func (m *awsEc2query_deserializeOpGetGroupsForCapacityReservation) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorGetGroupsForCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45445,14 +45246,14 @@ func awsEc2query_deserializeOpErrorGetGroupsForCapacityReservation(response *smi } } -type awsEc2query_deserializeOpGetHostReservationPurchasePreview struct { +type awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles struct { } -func (*awsEc2query_deserializeOpGetHostReservationPurchasePreview) ID() string { +func (*awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetHostReservationPurchasePreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetAssociatedEnclaveCertificateIamRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45470,9 +45271,9 @@ func (m *awsEc2query_deserializeOpGetHostReservationPurchasePreview) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetHostReservationPurchasePreview(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetAssociatedEnclaveCertificateIamRoles(response, &metadata) } - output := &GetHostReservationPurchasePreviewOutput{} + output := &GetAssociatedEnclaveCertificateIamRolesOutput{} out.Result = output var buff [1024]byte @@ -45493,7 +45294,7 @@ func (m *awsEc2query_deserializeOpGetHostReservationPurchasePreview) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetHostReservationPurchasePreviewOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetAssociatedEnclaveCertificateIamRolesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45507,7 +45308,7 @@ func (m *awsEc2query_deserializeOpGetHostReservationPurchasePreview) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorGetHostReservationPurchasePreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetAssociatedEnclaveCertificateIamRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45540,14 +45341,14 @@ func awsEc2query_deserializeOpErrorGetHostReservationPurchasePreview(response *s } } -type awsEc2query_deserializeOpGetImageBlockPublicAccessState struct { +type awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs struct { } -func (*awsEc2query_deserializeOpGetImageBlockPublicAccessState) ID() string { +func (*awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetImageBlockPublicAccessState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetAssociatedIpv6PoolCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45565,9 +45366,9 @@ func (m *awsEc2query_deserializeOpGetImageBlockPublicAccessState) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetImageBlockPublicAccessState(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetAssociatedIpv6PoolCidrs(response, &metadata) } - output := &GetImageBlockPublicAccessStateOutput{} + output := &GetAssociatedIpv6PoolCidrsOutput{} out.Result = output var buff [1024]byte @@ -45588,7 +45389,7 @@ func (m *awsEc2query_deserializeOpGetImageBlockPublicAccessState) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetImageBlockPublicAccessStateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetAssociatedIpv6PoolCidrsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45602,7 +45403,7 @@ func (m *awsEc2query_deserializeOpGetImageBlockPublicAccessState) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorGetImageBlockPublicAccessState(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetAssociatedIpv6PoolCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45635,14 +45436,14 @@ func awsEc2query_deserializeOpErrorGetImageBlockPublicAccessState(response *smit } } -type awsEc2query_deserializeOpGetInstanceMetadataDefaults struct { +type awsEc2query_deserializeOpGetAwsNetworkPerformanceData struct { } -func (*awsEc2query_deserializeOpGetInstanceMetadataDefaults) ID() string { +func (*awsEc2query_deserializeOpGetAwsNetworkPerformanceData) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetInstanceMetadataDefaults) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetAwsNetworkPerformanceData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45660,9 +45461,9 @@ func (m *awsEc2query_deserializeOpGetInstanceMetadataDefaults) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetInstanceMetadataDefaults(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetAwsNetworkPerformanceData(response, &metadata) } - output := &GetInstanceMetadataDefaultsOutput{} + output := &GetAwsNetworkPerformanceDataOutput{} out.Result = output var buff [1024]byte @@ -45683,7 +45484,7 @@ func (m *awsEc2query_deserializeOpGetInstanceMetadataDefaults) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetInstanceMetadataDefaultsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetAwsNetworkPerformanceDataOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45697,7 +45498,7 @@ func (m *awsEc2query_deserializeOpGetInstanceMetadataDefaults) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorGetInstanceMetadataDefaults(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetAwsNetworkPerformanceData(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45730,14 +45531,14 @@ func awsEc2query_deserializeOpErrorGetInstanceMetadataDefaults(response *smithyh } } -type awsEc2query_deserializeOpGetInstanceTpmEkPub struct { +type awsEc2query_deserializeOpGetCapacityReservationUsage struct { } -func (*awsEc2query_deserializeOpGetInstanceTpmEkPub) ID() string { +func (*awsEc2query_deserializeOpGetCapacityReservationUsage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetInstanceTpmEkPub) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetCapacityReservationUsage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45755,9 +45556,9 @@ func (m *awsEc2query_deserializeOpGetInstanceTpmEkPub) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetInstanceTpmEkPub(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetCapacityReservationUsage(response, &metadata) } - output := &GetInstanceTpmEkPubOutput{} + output := &GetCapacityReservationUsageOutput{} out.Result = output var buff [1024]byte @@ -45778,7 +45579,7 @@ func (m *awsEc2query_deserializeOpGetInstanceTpmEkPub) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetInstanceTpmEkPubOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetCapacityReservationUsageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45792,7 +45593,7 @@ func (m *awsEc2query_deserializeOpGetInstanceTpmEkPub) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorGetInstanceTpmEkPub(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetCapacityReservationUsage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45825,14 +45626,14 @@ func awsEc2query_deserializeOpErrorGetInstanceTpmEkPub(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements struct { +type awsEc2query_deserializeOpGetCoipPoolUsage struct { } -func (*awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) ID() string { +func (*awsEc2query_deserializeOpGetCoipPoolUsage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetCoipPoolUsage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45850,9 +45651,9 @@ func (m *awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetInstanceTypesFromInstanceRequirements(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetCoipPoolUsage(response, &metadata) } - output := &GetInstanceTypesFromInstanceRequirementsOutput{} + output := &GetCoipPoolUsageOutput{} out.Result = output var buff [1024]byte @@ -45873,7 +45674,7 @@ func (m *awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetInstanceTypesFromInstanceRequirementsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetCoipPoolUsageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45887,7 +45688,7 @@ func (m *awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorGetInstanceTypesFromInstanceRequirements(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetCoipPoolUsage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -45920,14 +45721,14 @@ func awsEc2query_deserializeOpErrorGetInstanceTypesFromInstanceRequirements(resp } } -type awsEc2query_deserializeOpGetInstanceUefiData struct { +type awsEc2query_deserializeOpGetConsoleOutput struct { } -func (*awsEc2query_deserializeOpGetInstanceUefiData) ID() string { +func (*awsEc2query_deserializeOpGetConsoleOutput) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetInstanceUefiData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetConsoleOutput) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -45945,9 +45746,9 @@ func (m *awsEc2query_deserializeOpGetInstanceUefiData) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetInstanceUefiData(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetConsoleOutput(response, &metadata) } - output := &GetInstanceUefiDataOutput{} + output := &GetConsoleOutputOutput{} out.Result = output var buff [1024]byte @@ -45968,7 +45769,7 @@ func (m *awsEc2query_deserializeOpGetInstanceUefiData) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetInstanceUefiDataOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetConsoleOutputOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -45982,7 +45783,7 @@ func (m *awsEc2query_deserializeOpGetInstanceUefiData) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorGetInstanceUefiData(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetConsoleOutput(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46015,14 +45816,14 @@ func awsEc2query_deserializeOpErrorGetInstanceUefiData(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpGetIpamAddressHistory struct { +type awsEc2query_deserializeOpGetConsoleScreenshot struct { } -func (*awsEc2query_deserializeOpGetIpamAddressHistory) ID() string { +func (*awsEc2query_deserializeOpGetConsoleScreenshot) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamAddressHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetConsoleScreenshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46040,9 +45841,9 @@ func (m *awsEc2query_deserializeOpGetIpamAddressHistory) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamAddressHistory(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetConsoleScreenshot(response, &metadata) } - output := &GetIpamAddressHistoryOutput{} + output := &GetConsoleScreenshotOutput{} out.Result = output var buff [1024]byte @@ -46063,7 +45864,7 @@ func (m *awsEc2query_deserializeOpGetIpamAddressHistory) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamAddressHistoryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetConsoleScreenshotOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46077,7 +45878,7 @@ func (m *awsEc2query_deserializeOpGetIpamAddressHistory) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamAddressHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetConsoleScreenshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46110,14 +45911,14 @@ func awsEc2query_deserializeOpErrorGetIpamAddressHistory(response *smithyhttp.Re } } -type awsEc2query_deserializeOpGetIpamDiscoveredAccounts struct { +type awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary struct { } -func (*awsEc2query_deserializeOpGetIpamDiscoveredAccounts) ID() string { +func (*awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamDiscoveredAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetDeclarativePoliciesReportSummary) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46135,9 +45936,9 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredAccounts) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamDiscoveredAccounts(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetDeclarativePoliciesReportSummary(response, &metadata) } - output := &GetIpamDiscoveredAccountsOutput{} + output := &GetDeclarativePoliciesReportSummaryOutput{} out.Result = output var buff [1024]byte @@ -46158,7 +45959,7 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredAccounts) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamDiscoveredAccountsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetDeclarativePoliciesReportSummaryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46172,7 +45973,7 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredAccounts) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamDiscoveredAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetDeclarativePoliciesReportSummary(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46205,14 +46006,14 @@ func awsEc2query_deserializeOpErrorGetIpamDiscoveredAccounts(response *smithyhtt } } -type awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses struct { +type awsEc2query_deserializeOpGetDefaultCreditSpecification struct { } -func (*awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) ID() string { +func (*awsEc2query_deserializeOpGetDefaultCreditSpecification) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetDefaultCreditSpecification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46230,9 +46031,9 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamDiscoveredPublicAddresses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetDefaultCreditSpecification(response, &metadata) } - output := &GetIpamDiscoveredPublicAddressesOutput{} + output := &GetDefaultCreditSpecificationOutput{} out.Result = output var buff [1024]byte @@ -46253,7 +46054,7 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamDiscoveredPublicAddressesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetDefaultCreditSpecificationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46267,7 +46068,7 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamDiscoveredPublicAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetDefaultCreditSpecification(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46300,14 +46101,14 @@ func awsEc2query_deserializeOpErrorGetIpamDiscoveredPublicAddresses(response *sm } } -type awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs struct { +type awsEc2query_deserializeOpGetEbsDefaultKmsKeyId struct { } -func (*awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) ID() string { +func (*awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetEbsDefaultKmsKeyId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46325,9 +46126,9 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamDiscoveredResourceCidrs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetEbsDefaultKmsKeyId(response, &metadata) } - output := &GetIpamDiscoveredResourceCidrsOutput{} + output := &GetEbsDefaultKmsKeyIdOutput{} out.Result = output var buff [1024]byte @@ -46348,7 +46149,7 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamDiscoveredResourceCidrsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetEbsDefaultKmsKeyIdOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46362,7 +46163,7 @@ func (m *awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamDiscoveredResourceCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetEbsDefaultKmsKeyId(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46395,14 +46196,14 @@ func awsEc2query_deserializeOpErrorGetIpamDiscoveredResourceCidrs(response *smit } } -type awsEc2query_deserializeOpGetIpamPoolAllocations struct { +type awsEc2query_deserializeOpGetEbsEncryptionByDefault struct { } -func (*awsEc2query_deserializeOpGetIpamPoolAllocations) ID() string { +func (*awsEc2query_deserializeOpGetEbsEncryptionByDefault) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamPoolAllocations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetEbsEncryptionByDefault) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46420,9 +46221,9 @@ func (m *awsEc2query_deserializeOpGetIpamPoolAllocations) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamPoolAllocations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetEbsEncryptionByDefault(response, &metadata) } - output := &GetIpamPoolAllocationsOutput{} + output := &GetEbsEncryptionByDefaultOutput{} out.Result = output var buff [1024]byte @@ -46443,7 +46244,7 @@ func (m *awsEc2query_deserializeOpGetIpamPoolAllocations) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamPoolAllocationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetEbsEncryptionByDefaultOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46457,7 +46258,7 @@ func (m *awsEc2query_deserializeOpGetIpamPoolAllocations) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamPoolAllocations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetEbsEncryptionByDefault(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46490,14 +46291,14 @@ func awsEc2query_deserializeOpErrorGetIpamPoolAllocations(response *smithyhttp.R } } -type awsEc2query_deserializeOpGetIpamPoolCidrs struct { +type awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate struct { } -func (*awsEc2query_deserializeOpGetIpamPoolCidrs) ID() string { +func (*awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamPoolCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetFlowLogsIntegrationTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46515,9 +46316,9 @@ func (m *awsEc2query_deserializeOpGetIpamPoolCidrs) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamPoolCidrs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetFlowLogsIntegrationTemplate(response, &metadata) } - output := &GetIpamPoolCidrsOutput{} + output := &GetFlowLogsIntegrationTemplateOutput{} out.Result = output var buff [1024]byte @@ -46538,7 +46339,7 @@ func (m *awsEc2query_deserializeOpGetIpamPoolCidrs) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamPoolCidrsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetFlowLogsIntegrationTemplateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46552,7 +46353,7 @@ func (m *awsEc2query_deserializeOpGetIpamPoolCidrs) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamPoolCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetFlowLogsIntegrationTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46585,14 +46386,14 @@ func awsEc2query_deserializeOpErrorGetIpamPoolCidrs(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpGetIpamResourceCidrs struct { +type awsEc2query_deserializeOpGetGroupsForCapacityReservation struct { } -func (*awsEc2query_deserializeOpGetIpamResourceCidrs) ID() string { +func (*awsEc2query_deserializeOpGetGroupsForCapacityReservation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetIpamResourceCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetGroupsForCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46610,9 +46411,9 @@ func (m *awsEc2query_deserializeOpGetIpamResourceCidrs) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetIpamResourceCidrs(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetGroupsForCapacityReservation(response, &metadata) } - output := &GetIpamResourceCidrsOutput{} + output := &GetGroupsForCapacityReservationOutput{} out.Result = output var buff [1024]byte @@ -46633,7 +46434,7 @@ func (m *awsEc2query_deserializeOpGetIpamResourceCidrs) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetIpamResourceCidrsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetGroupsForCapacityReservationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46647,7 +46448,7 @@ func (m *awsEc2query_deserializeOpGetIpamResourceCidrs) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorGetIpamResourceCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetGroupsForCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46680,14 +46481,14 @@ func awsEc2query_deserializeOpErrorGetIpamResourceCidrs(response *smithyhttp.Res } } -type awsEc2query_deserializeOpGetLaunchTemplateData struct { +type awsEc2query_deserializeOpGetHostReservationPurchasePreview struct { } -func (*awsEc2query_deserializeOpGetLaunchTemplateData) ID() string { +func (*awsEc2query_deserializeOpGetHostReservationPurchasePreview) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetLaunchTemplateData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetHostReservationPurchasePreview) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46705,9 +46506,9 @@ func (m *awsEc2query_deserializeOpGetLaunchTemplateData) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetLaunchTemplateData(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetHostReservationPurchasePreview(response, &metadata) } - output := &GetLaunchTemplateDataOutput{} + output := &GetHostReservationPurchasePreviewOutput{} out.Result = output var buff [1024]byte @@ -46728,7 +46529,7 @@ func (m *awsEc2query_deserializeOpGetLaunchTemplateData) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetLaunchTemplateDataOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetHostReservationPurchasePreviewOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46742,7 +46543,7 @@ func (m *awsEc2query_deserializeOpGetLaunchTemplateData) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorGetLaunchTemplateData(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetHostReservationPurchasePreview(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46775,14 +46576,14 @@ func awsEc2query_deserializeOpErrorGetLaunchTemplateData(response *smithyhttp.Re } } -type awsEc2query_deserializeOpGetManagedPrefixListAssociations struct { +type awsEc2query_deserializeOpGetImageBlockPublicAccessState struct { } -func (*awsEc2query_deserializeOpGetManagedPrefixListAssociations) ID() string { +func (*awsEc2query_deserializeOpGetImageBlockPublicAccessState) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetManagedPrefixListAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetImageBlockPublicAccessState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46800,9 +46601,9 @@ func (m *awsEc2query_deserializeOpGetManagedPrefixListAssociations) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetManagedPrefixListAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetImageBlockPublicAccessState(response, &metadata) } - output := &GetManagedPrefixListAssociationsOutput{} + output := &GetImageBlockPublicAccessStateOutput{} out.Result = output var buff [1024]byte @@ -46823,7 +46624,7 @@ func (m *awsEc2query_deserializeOpGetManagedPrefixListAssociations) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetManagedPrefixListAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetImageBlockPublicAccessStateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46837,7 +46638,7 @@ func (m *awsEc2query_deserializeOpGetManagedPrefixListAssociations) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorGetManagedPrefixListAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetImageBlockPublicAccessState(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46870,14 +46671,14 @@ func awsEc2query_deserializeOpErrorGetManagedPrefixListAssociations(response *sm } } -type awsEc2query_deserializeOpGetManagedPrefixListEntries struct { +type awsEc2query_deserializeOpGetInstanceMetadataDefaults struct { } -func (*awsEc2query_deserializeOpGetManagedPrefixListEntries) ID() string { +func (*awsEc2query_deserializeOpGetInstanceMetadataDefaults) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetManagedPrefixListEntries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetInstanceMetadataDefaults) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46895,9 +46696,9 @@ func (m *awsEc2query_deserializeOpGetManagedPrefixListEntries) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetManagedPrefixListEntries(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetInstanceMetadataDefaults(response, &metadata) } - output := &GetManagedPrefixListEntriesOutput{} + output := &GetInstanceMetadataDefaultsOutput{} out.Result = output var buff [1024]byte @@ -46918,7 +46719,7 @@ func (m *awsEc2query_deserializeOpGetManagedPrefixListEntries) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetManagedPrefixListEntriesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetInstanceMetadataDefaultsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -46932,7 +46733,7 @@ func (m *awsEc2query_deserializeOpGetManagedPrefixListEntries) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorGetManagedPrefixListEntries(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetInstanceMetadataDefaults(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -46965,14 +46766,14 @@ func awsEc2query_deserializeOpErrorGetManagedPrefixListEntries(response *smithyh } } -type awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings struct { +type awsEc2query_deserializeOpGetInstanceTpmEkPub struct { } -func (*awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) ID() string { +func (*awsEc2query_deserializeOpGetInstanceTpmEkPub) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetInstanceTpmEkPub) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -46990,9 +46791,9 @@ func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeAnalysisFindings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetInstanceTpmEkPub(response, &metadata) } - output := &GetNetworkInsightsAccessScopeAnalysisFindingsOutput{} + output := &GetInstanceTpmEkPubOutput{} out.Result = output var buff [1024]byte @@ -47013,7 +46814,7 @@ func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetNetworkInsightsAccessScopeAnalysisFindingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetInstanceTpmEkPubOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47027,7 +46828,7 @@ func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) return out, metadata, err } -func awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeAnalysisFindings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetInstanceTpmEkPub(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47060,14 +46861,14 @@ func awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeAnalysisFindings } } -type awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent struct { +type awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements struct { } -func (*awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) ID() string { +func (*awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetInstanceTypesFromInstanceRequirements) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47085,9 +46886,9 @@ func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeContent(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetInstanceTypesFromInstanceRequirements(response, &metadata) } - output := &GetNetworkInsightsAccessScopeContentOutput{} + output := &GetInstanceTypesFromInstanceRequirementsOutput{} out.Result = output var buff [1024]byte @@ -47108,7 +46909,7 @@ func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetNetworkInsightsAccessScopeContentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetInstanceTypesFromInstanceRequirementsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47122,7 +46923,7 @@ func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetInstanceTypesFromInstanceRequirements(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47155,14 +46956,14 @@ func awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeContent(response } } -type awsEc2query_deserializeOpGetPasswordData struct { +type awsEc2query_deserializeOpGetInstanceUefiData struct { } -func (*awsEc2query_deserializeOpGetPasswordData) ID() string { +func (*awsEc2query_deserializeOpGetInstanceUefiData) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetPasswordData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetInstanceUefiData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47180,9 +46981,9 @@ func (m *awsEc2query_deserializeOpGetPasswordData) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetPasswordData(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetInstanceUefiData(response, &metadata) } - output := &GetPasswordDataOutput{} + output := &GetInstanceUefiDataOutput{} out.Result = output var buff [1024]byte @@ -47203,7 +47004,7 @@ func (m *awsEc2query_deserializeOpGetPasswordData) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetPasswordDataOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetInstanceUefiDataOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47217,7 +47018,7 @@ func (m *awsEc2query_deserializeOpGetPasswordData) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorGetPasswordData(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetInstanceUefiData(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47250,14 +47051,14 @@ func awsEc2query_deserializeOpErrorGetPasswordData(response *smithyhttp.Response } } -type awsEc2query_deserializeOpGetReservedInstancesExchangeQuote struct { +type awsEc2query_deserializeOpGetIpamAddressHistory struct { } -func (*awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) ID() string { +func (*awsEc2query_deserializeOpGetIpamAddressHistory) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamAddressHistory) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47275,9 +47076,9 @@ func (m *awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetReservedInstancesExchangeQuote(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamAddressHistory(response, &metadata) } - output := &GetReservedInstancesExchangeQuoteOutput{} + output := &GetIpamAddressHistoryOutput{} out.Result = output var buff [1024]byte @@ -47298,7 +47099,7 @@ func (m *awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetReservedInstancesExchangeQuoteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamAddressHistoryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47312,7 +47113,7 @@ func (m *awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorGetReservedInstancesExchangeQuote(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamAddressHistory(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47345,14 +47146,14 @@ func awsEc2query_deserializeOpErrorGetReservedInstancesExchangeQuote(response *s } } -type awsEc2query_deserializeOpGetRouteServerAssociations struct { +type awsEc2query_deserializeOpGetIpamDiscoveredAccounts struct { } -func (*awsEc2query_deserializeOpGetRouteServerAssociations) ID() string { +func (*awsEc2query_deserializeOpGetIpamDiscoveredAccounts) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetRouteServerAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamDiscoveredAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47370,9 +47171,9 @@ func (m *awsEc2query_deserializeOpGetRouteServerAssociations) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetRouteServerAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamDiscoveredAccounts(response, &metadata) } - output := &GetRouteServerAssociationsOutput{} + output := &GetIpamDiscoveredAccountsOutput{} out.Result = output var buff [1024]byte @@ -47393,7 +47194,7 @@ func (m *awsEc2query_deserializeOpGetRouteServerAssociations) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetRouteServerAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamDiscoveredAccountsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47407,7 +47208,7 @@ func (m *awsEc2query_deserializeOpGetRouteServerAssociations) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorGetRouteServerAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamDiscoveredAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47440,14 +47241,14 @@ func awsEc2query_deserializeOpErrorGetRouteServerAssociations(response *smithyht } } -type awsEc2query_deserializeOpGetRouteServerPropagations struct { +type awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses struct { } -func (*awsEc2query_deserializeOpGetRouteServerPropagations) ID() string { +func (*awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetRouteServerPropagations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamDiscoveredPublicAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47465,9 +47266,9 @@ func (m *awsEc2query_deserializeOpGetRouteServerPropagations) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetRouteServerPropagations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamDiscoveredPublicAddresses(response, &metadata) } - output := &GetRouteServerPropagationsOutput{} + output := &GetIpamDiscoveredPublicAddressesOutput{} out.Result = output var buff [1024]byte @@ -47488,7 +47289,7 @@ func (m *awsEc2query_deserializeOpGetRouteServerPropagations) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetRouteServerPropagationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamDiscoveredPublicAddressesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47502,7 +47303,7 @@ func (m *awsEc2query_deserializeOpGetRouteServerPropagations) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorGetRouteServerPropagations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamDiscoveredPublicAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47535,14 +47336,14 @@ func awsEc2query_deserializeOpErrorGetRouteServerPropagations(response *smithyht } } -type awsEc2query_deserializeOpGetRouteServerRoutingDatabase struct { +type awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs struct { } -func (*awsEc2query_deserializeOpGetRouteServerRoutingDatabase) ID() string { +func (*awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetRouteServerRoutingDatabase) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamDiscoveredResourceCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47560,9 +47361,9 @@ func (m *awsEc2query_deserializeOpGetRouteServerRoutingDatabase) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetRouteServerRoutingDatabase(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamDiscoveredResourceCidrs(response, &metadata) } - output := &GetRouteServerRoutingDatabaseOutput{} + output := &GetIpamDiscoveredResourceCidrsOutput{} out.Result = output var buff [1024]byte @@ -47583,7 +47384,7 @@ func (m *awsEc2query_deserializeOpGetRouteServerRoutingDatabase) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetRouteServerRoutingDatabaseOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamDiscoveredResourceCidrsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47597,7 +47398,7 @@ func (m *awsEc2query_deserializeOpGetRouteServerRoutingDatabase) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorGetRouteServerRoutingDatabase(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamDiscoveredResourceCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47630,14 +47431,14 @@ func awsEc2query_deserializeOpErrorGetRouteServerRoutingDatabase(response *smith } } -type awsEc2query_deserializeOpGetSecurityGroupsForVpc struct { +type awsEc2query_deserializeOpGetIpamPoolAllocations struct { } -func (*awsEc2query_deserializeOpGetSecurityGroupsForVpc) ID() string { +func (*awsEc2query_deserializeOpGetIpamPoolAllocations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetSecurityGroupsForVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamPoolAllocations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47655,9 +47456,9 @@ func (m *awsEc2query_deserializeOpGetSecurityGroupsForVpc) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetSecurityGroupsForVpc(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamPoolAllocations(response, &metadata) } - output := &GetSecurityGroupsForVpcOutput{} + output := &GetIpamPoolAllocationsOutput{} out.Result = output var buff [1024]byte @@ -47678,7 +47479,7 @@ func (m *awsEc2query_deserializeOpGetSecurityGroupsForVpc) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetSecurityGroupsForVpcOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamPoolAllocationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47692,7 +47493,7 @@ func (m *awsEc2query_deserializeOpGetSecurityGroupsForVpc) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorGetSecurityGroupsForVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamPoolAllocations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47725,14 +47526,14 @@ func awsEc2query_deserializeOpErrorGetSecurityGroupsForVpc(response *smithyhttp. } } -type awsEc2query_deserializeOpGetSerialConsoleAccessStatus struct { +type awsEc2query_deserializeOpGetIpamPoolCidrs struct { } -func (*awsEc2query_deserializeOpGetSerialConsoleAccessStatus) ID() string { +func (*awsEc2query_deserializeOpGetIpamPoolCidrs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetSerialConsoleAccessStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamPoolCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47750,9 +47551,9 @@ func (m *awsEc2query_deserializeOpGetSerialConsoleAccessStatus) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetSerialConsoleAccessStatus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamPoolCidrs(response, &metadata) } - output := &GetSerialConsoleAccessStatusOutput{} + output := &GetIpamPoolCidrsOutput{} out.Result = output var buff [1024]byte @@ -47773,7 +47574,7 @@ func (m *awsEc2query_deserializeOpGetSerialConsoleAccessStatus) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetSerialConsoleAccessStatusOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamPoolCidrsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47787,7 +47588,7 @@ func (m *awsEc2query_deserializeOpGetSerialConsoleAccessStatus) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorGetSerialConsoleAccessStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamPoolCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47820,14 +47621,14 @@ func awsEc2query_deserializeOpErrorGetSerialConsoleAccessStatus(response *smithy } } -type awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState struct { +type awsEc2query_deserializeOpGetIpamResourceCidrs struct { } -func (*awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) ID() string { +func (*awsEc2query_deserializeOpGetIpamResourceCidrs) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetIpamResourceCidrs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47845,9 +47646,9 @@ func (m *awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetSnapshotBlockPublicAccessState(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetIpamResourceCidrs(response, &metadata) } - output := &GetSnapshotBlockPublicAccessStateOutput{} + output := &GetIpamResourceCidrsOutput{} out.Result = output var buff [1024]byte @@ -47868,7 +47669,7 @@ func (m *awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetSnapshotBlockPublicAccessStateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetIpamResourceCidrsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47882,7 +47683,7 @@ func (m *awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorGetSnapshotBlockPublicAccessState(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetIpamResourceCidrs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -47915,14 +47716,14 @@ func awsEc2query_deserializeOpErrorGetSnapshotBlockPublicAccessState(response *s } } -type awsEc2query_deserializeOpGetSpotPlacementScores struct { +type awsEc2query_deserializeOpGetLaunchTemplateData struct { } -func (*awsEc2query_deserializeOpGetSpotPlacementScores) ID() string { +func (*awsEc2query_deserializeOpGetLaunchTemplateData) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetSpotPlacementScores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetLaunchTemplateData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -47940,9 +47741,9 @@ func (m *awsEc2query_deserializeOpGetSpotPlacementScores) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetSpotPlacementScores(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetLaunchTemplateData(response, &metadata) } - output := &GetSpotPlacementScoresOutput{} + output := &GetLaunchTemplateDataOutput{} out.Result = output var buff [1024]byte @@ -47963,7 +47764,7 @@ func (m *awsEc2query_deserializeOpGetSpotPlacementScores) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetSpotPlacementScoresOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetLaunchTemplateDataOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -47977,7 +47778,7 @@ func (m *awsEc2query_deserializeOpGetSpotPlacementScores) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorGetSpotPlacementScores(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetLaunchTemplateData(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48010,14 +47811,14 @@ func awsEc2query_deserializeOpErrorGetSpotPlacementScores(response *smithyhttp.R } } -type awsEc2query_deserializeOpGetSubnetCidrReservations struct { +type awsEc2query_deserializeOpGetManagedPrefixListAssociations struct { } -func (*awsEc2query_deserializeOpGetSubnetCidrReservations) ID() string { +func (*awsEc2query_deserializeOpGetManagedPrefixListAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetSubnetCidrReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetManagedPrefixListAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48035,9 +47836,9 @@ func (m *awsEc2query_deserializeOpGetSubnetCidrReservations) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetSubnetCidrReservations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetManagedPrefixListAssociations(response, &metadata) } - output := &GetSubnetCidrReservationsOutput{} + output := &GetManagedPrefixListAssociationsOutput{} out.Result = output var buff [1024]byte @@ -48058,7 +47859,7 @@ func (m *awsEc2query_deserializeOpGetSubnetCidrReservations) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetSubnetCidrReservationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetManagedPrefixListAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48072,7 +47873,7 @@ func (m *awsEc2query_deserializeOpGetSubnetCidrReservations) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorGetSubnetCidrReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetManagedPrefixListAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48105,14 +47906,14 @@ func awsEc2query_deserializeOpErrorGetSubnetCidrReservations(response *smithyhtt } } -type awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations struct { +type awsEc2query_deserializeOpGetManagedPrefixListEntries struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) ID() string { +func (*awsEc2query_deserializeOpGetManagedPrefixListEntries) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetManagedPrefixListEntries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48130,9 +47931,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayAttachmentPropagations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetManagedPrefixListEntries(response, &metadata) } - output := &GetTransitGatewayAttachmentPropagationsOutput{} + output := &GetManagedPrefixListEntriesOutput{} out.Result = output var buff [1024]byte @@ -48153,7 +47954,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayAttachmentPropagationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetManagedPrefixListEntriesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48167,7 +47968,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayAttachmentPropagations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetManagedPrefixListEntries(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48200,14 +48001,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayAttachmentPropagations(respo } } -type awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations struct { +type awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) ID() string { +func (*awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeAnalysisFindings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48225,9 +48026,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayMulticastDomainAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeAnalysisFindings(response, &metadata) } - output := &GetTransitGatewayMulticastDomainAssociationsOutput{} + output := &GetNetworkInsightsAccessScopeAnalysisFindingsOutput{} out.Result = output var buff [1024]byte @@ -48248,7 +48049,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayMulticastDomainAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetNetworkInsightsAccessScopeAnalysisFindingsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48262,7 +48063,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayMulticastDomainAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeAnalysisFindings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48295,14 +48096,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayMulticastDomainAssociations( } } -type awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations struct { +type awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) ID() string { +func (*awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetNetworkInsightsAccessScopeContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48320,9 +48121,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeContent(response, &metadata) } - output := &GetTransitGatewayPolicyTableAssociationsOutput{} + output := &GetNetworkInsightsAccessScopeContentOutput{} out.Result = output var buff [1024]byte @@ -48343,7 +48144,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayPolicyTableAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetNetworkInsightsAccessScopeContentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48357,7 +48158,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetNetworkInsightsAccessScopeContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48390,14 +48191,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableAssociations(resp } } -type awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries struct { +type awsEc2query_deserializeOpGetPasswordData struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) ID() string { +func (*awsEc2query_deserializeOpGetPasswordData) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetPasswordData) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48415,9 +48216,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableEntries(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetPasswordData(response, &metadata) } - output := &GetTransitGatewayPolicyTableEntriesOutput{} + output := &GetPasswordDataOutput{} out.Result = output var buff [1024]byte @@ -48438,7 +48239,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayPolicyTableEntriesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetPasswordDataOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48452,7 +48253,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableEntries(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetPasswordData(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48485,14 +48286,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableEntries(response } } -type awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences struct { +type awsEc2query_deserializeOpGetReservedInstancesExchangeQuote struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) ID() string { +func (*awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetReservedInstancesExchangeQuote) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48510,9 +48311,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayPrefixListReferences(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetReservedInstancesExchangeQuote(response, &metadata) } - output := &GetTransitGatewayPrefixListReferencesOutput{} + output := &GetReservedInstancesExchangeQuoteOutput{} out.Result = output var buff [1024]byte @@ -48533,7 +48334,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayPrefixListReferencesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetReservedInstancesExchangeQuoteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48547,7 +48348,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayPrefixListReferences(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetReservedInstancesExchangeQuote(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48580,14 +48381,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayPrefixListReferences(respons } } -type awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations struct { +type awsEc2query_deserializeOpGetRouteServerAssociations struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) ID() string { +func (*awsEc2query_deserializeOpGetRouteServerAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetRouteServerAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48605,9 +48406,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayRouteTableAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetRouteServerAssociations(response, &metadata) } - output := &GetTransitGatewayRouteTableAssociationsOutput{} + output := &GetRouteServerAssociationsOutput{} out.Result = output var buff [1024]byte @@ -48628,7 +48429,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayRouteTableAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetRouteServerAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48642,7 +48443,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayRouteTableAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetRouteServerAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48675,14 +48476,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayRouteTableAssociations(respo } } -type awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations struct { +type awsEc2query_deserializeOpGetRouteServerPropagations struct { } -func (*awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) ID() string { +func (*awsEc2query_deserializeOpGetRouteServerPropagations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetRouteServerPropagations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48700,9 +48501,9 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayRouteTablePropagations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetRouteServerPropagations(response, &metadata) } - output := &GetTransitGatewayRouteTablePropagationsOutput{} + output := &GetRouteServerPropagationsOutput{} out.Result = output var buff [1024]byte @@ -48723,7 +48524,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetTransitGatewayRouteTablePropagationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetRouteServerPropagationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48737,7 +48538,7 @@ func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorGetTransitGatewayRouteTablePropagations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetRouteServerPropagations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48770,14 +48571,14 @@ func awsEc2query_deserializeOpErrorGetTransitGatewayRouteTablePropagations(respo } } -type awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy struct { +type awsEc2query_deserializeOpGetRouteServerRoutingDatabase struct { } -func (*awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) ID() string { +func (*awsEc2query_deserializeOpGetRouteServerRoutingDatabase) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetRouteServerRoutingDatabase) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48795,9 +48596,9 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointPolicy(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetRouteServerRoutingDatabase(response, &metadata) } - output := &GetVerifiedAccessEndpointPolicyOutput{} + output := &GetRouteServerRoutingDatabaseOutput{} out.Result = output var buff [1024]byte @@ -48818,7 +48619,7 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetVerifiedAccessEndpointPolicyOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetRouteServerRoutingDatabaseOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48832,7 +48633,7 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetRouteServerRoutingDatabase(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48865,14 +48666,14 @@ func awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointPolicy(response *smi } } -type awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets struct { +type awsEc2query_deserializeOpGetSecurityGroupsForVpc struct { } -func (*awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) ID() string { +func (*awsEc2query_deserializeOpGetSecurityGroupsForVpc) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetSecurityGroupsForVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48890,9 +48691,9 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointTargets(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetSecurityGroupsForVpc(response, &metadata) } - output := &GetVerifiedAccessEndpointTargetsOutput{} + output := &GetSecurityGroupsForVpcOutput{} out.Result = output var buff [1024]byte @@ -48913,7 +48714,7 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetVerifiedAccessEndpointTargetsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetSecurityGroupsForVpcOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -48927,7 +48728,7 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointTargets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetSecurityGroupsForVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -48960,14 +48761,14 @@ func awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointTargets(response *sm } } -type awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy struct { +type awsEc2query_deserializeOpGetSerialConsoleAccessStatus struct { } -func (*awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) ID() string { +func (*awsEc2query_deserializeOpGetSerialConsoleAccessStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetSerialConsoleAccessStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -48985,9 +48786,9 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetVerifiedAccessGroupPolicy(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetSerialConsoleAccessStatus(response, &metadata) } - output := &GetVerifiedAccessGroupPolicyOutput{} + output := &GetSerialConsoleAccessStatusOutput{} out.Result = output var buff [1024]byte @@ -49008,7 +48809,7 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetVerifiedAccessGroupPolicyOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetSerialConsoleAccessStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49022,7 +48823,7 @@ func (m *awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorGetVerifiedAccessGroupPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetSerialConsoleAccessStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49055,14 +48856,14 @@ func awsEc2query_deserializeOpErrorGetVerifiedAccessGroupPolicy(response *smithy } } -type awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration struct { +type awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState struct { } -func (*awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) ID() string { +func (*awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetSnapshotBlockPublicAccessState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49080,9 +48881,9 @@ func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) Han } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetVpnConnectionDeviceSampleConfiguration(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetSnapshotBlockPublicAccessState(response, &metadata) } - output := &GetVpnConnectionDeviceSampleConfigurationOutput{} + output := &GetSnapshotBlockPublicAccessStateOutput{} out.Result = output var buff [1024]byte @@ -49103,7 +48904,7 @@ func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) Han } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetVpnConnectionDeviceSampleConfigurationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetSnapshotBlockPublicAccessStateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49117,7 +48918,7 @@ func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) Han return out, metadata, err } -func awsEc2query_deserializeOpErrorGetVpnConnectionDeviceSampleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetSnapshotBlockPublicAccessState(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49150,14 +48951,14 @@ func awsEc2query_deserializeOpErrorGetVpnConnectionDeviceSampleConfiguration(res } } -type awsEc2query_deserializeOpGetVpnConnectionDeviceTypes struct { +type awsEc2query_deserializeOpGetSpotPlacementScores struct { } -func (*awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) ID() string { +func (*awsEc2query_deserializeOpGetSpotPlacementScores) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetSpotPlacementScores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49175,9 +48976,9 @@ func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetVpnConnectionDeviceTypes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetSpotPlacementScores(response, &metadata) } - output := &GetVpnConnectionDeviceTypesOutput{} + output := &GetSpotPlacementScoresOutput{} out.Result = output var buff [1024]byte @@ -49198,7 +48999,7 @@ func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetVpnConnectionDeviceTypesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetSpotPlacementScoresOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49212,7 +49013,7 @@ func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorGetVpnConnectionDeviceTypes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetSpotPlacementScores(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49245,14 +49046,14 @@ func awsEc2query_deserializeOpErrorGetVpnConnectionDeviceTypes(response *smithyh } } -type awsEc2query_deserializeOpGetVpnTunnelReplacementStatus struct { +type awsEc2query_deserializeOpGetSubnetCidrReservations struct { } -func (*awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) ID() string { +func (*awsEc2query_deserializeOpGetSubnetCidrReservations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetSubnetCidrReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49270,9 +49071,9 @@ func (m *awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorGetVpnTunnelReplacementStatus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetSubnetCidrReservations(response, &metadata) } - output := &GetVpnTunnelReplacementStatusOutput{} + output := &GetSubnetCidrReservationsOutput{} out.Result = output var buff [1024]byte @@ -49293,7 +49094,7 @@ func (m *awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentGetVpnTunnelReplacementStatusOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetSubnetCidrReservationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49307,7 +49108,7 @@ func (m *awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorGetVpnTunnelReplacementStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetSubnetCidrReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49340,14 +49141,14 @@ func awsEc2query_deserializeOpErrorGetVpnTunnelReplacementStatus(response *smith } } -type awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList struct { +type awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations struct { } -func (*awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayAttachmentPropagations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49365,9 +49166,9 @@ func (m *awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorImportClientVpnClientCertificateRevocationList(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayAttachmentPropagations(response, &metadata) } - output := &ImportClientVpnClientCertificateRevocationListOutput{} + output := &GetTransitGatewayAttachmentPropagationsOutput{} out.Result = output var buff [1024]byte @@ -49388,7 +49189,7 @@ func (m *awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentImportClientVpnClientCertificateRevocationListOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayAttachmentPropagationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49402,7 +49203,7 @@ func (m *awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList return out, metadata, err } -func awsEc2query_deserializeOpErrorImportClientVpnClientCertificateRevocationList(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayAttachmentPropagations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49435,14 +49236,14 @@ func awsEc2query_deserializeOpErrorImportClientVpnClientCertificateRevocationLis } } -type awsEc2query_deserializeOpImportImage struct { +type awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations struct { } -func (*awsEc2query_deserializeOpImportImage) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpImportImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayMulticastDomainAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49460,9 +49261,9 @@ func (m *awsEc2query_deserializeOpImportImage) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorImportImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayMulticastDomainAssociations(response, &metadata) } - output := &ImportImageOutput{} + output := &GetTransitGatewayMulticastDomainAssociationsOutput{} out.Result = output var buff [1024]byte @@ -49483,7 +49284,7 @@ func (m *awsEc2query_deserializeOpImportImage) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentImportImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayMulticastDomainAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49497,7 +49298,7 @@ func (m *awsEc2query_deserializeOpImportImage) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorImportImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayMulticastDomainAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49530,14 +49331,14 @@ func awsEc2query_deserializeOpErrorImportImage(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpImportInstance struct { +type awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations struct { } -func (*awsEc2query_deserializeOpImportInstance) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpImportInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49555,9 +49356,9 @@ func (m *awsEc2query_deserializeOpImportInstance) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorImportInstance(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableAssociations(response, &metadata) } - output := &ImportInstanceOutput{} + output := &GetTransitGatewayPolicyTableAssociationsOutput{} out.Result = output var buff [1024]byte @@ -49578,7 +49379,7 @@ func (m *awsEc2query_deserializeOpImportInstance) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentImportInstanceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayPolicyTableAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49592,7 +49393,7 @@ func (m *awsEc2query_deserializeOpImportInstance) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorImportInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49625,14 +49426,14 @@ func awsEc2query_deserializeOpErrorImportInstance(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpImportKeyPair struct { +type awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries struct { } -func (*awsEc2query_deserializeOpImportKeyPair) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpImportKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayPolicyTableEntries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49650,9 +49451,9 @@ func (m *awsEc2query_deserializeOpImportKeyPair) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorImportKeyPair(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableEntries(response, &metadata) } - output := &ImportKeyPairOutput{} + output := &GetTransitGatewayPolicyTableEntriesOutput{} out.Result = output var buff [1024]byte @@ -49673,7 +49474,7 @@ func (m *awsEc2query_deserializeOpImportKeyPair) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentImportKeyPairOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayPolicyTableEntriesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49687,7 +49488,7 @@ func (m *awsEc2query_deserializeOpImportKeyPair) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorImportKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayPolicyTableEntries(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49720,14 +49521,14 @@ func awsEc2query_deserializeOpErrorImportKeyPair(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpImportSnapshot struct { +type awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences struct { } -func (*awsEc2query_deserializeOpImportSnapshot) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpImportSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayPrefixListReferences) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49745,9 +49546,9 @@ func (m *awsEc2query_deserializeOpImportSnapshot) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorImportSnapshot(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayPrefixListReferences(response, &metadata) } - output := &ImportSnapshotOutput{} + output := &GetTransitGatewayPrefixListReferencesOutput{} out.Result = output var buff [1024]byte @@ -49768,7 +49569,7 @@ func (m *awsEc2query_deserializeOpImportSnapshot) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentImportSnapshotOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayPrefixListReferencesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49782,7 +49583,7 @@ func (m *awsEc2query_deserializeOpImportSnapshot) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorImportSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayPrefixListReferences(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49815,14 +49616,14 @@ func awsEc2query_deserializeOpErrorImportSnapshot(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpImportVolume struct { +type awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations struct { } -func (*awsEc2query_deserializeOpImportVolume) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpImportVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTableAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49840,9 +49641,9 @@ func (m *awsEc2query_deserializeOpImportVolume) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorImportVolume(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayRouteTableAssociations(response, &metadata) } - output := &ImportVolumeOutput{} + output := &GetTransitGatewayRouteTableAssociationsOutput{} out.Result = output var buff [1024]byte @@ -49863,7 +49664,7 @@ func (m *awsEc2query_deserializeOpImportVolume) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentImportVolumeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayRouteTableAssociationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49877,7 +49678,7 @@ func (m *awsEc2query_deserializeOpImportVolume) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorImportVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayRouteTableAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -49910,14 +49711,14 @@ func awsEc2query_deserializeOpErrorImportVolume(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpListImagesInRecycleBin struct { +type awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations struct { } -func (*awsEc2query_deserializeOpListImagesInRecycleBin) ID() string { +func (*awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpListImagesInRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetTransitGatewayRouteTablePropagations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -49935,9 +49736,9 @@ func (m *awsEc2query_deserializeOpListImagesInRecycleBin) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorListImagesInRecycleBin(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetTransitGatewayRouteTablePropagations(response, &metadata) } - output := &ListImagesInRecycleBinOutput{} + output := &GetTransitGatewayRouteTablePropagationsOutput{} out.Result = output var buff [1024]byte @@ -49958,7 +49759,7 @@ func (m *awsEc2query_deserializeOpListImagesInRecycleBin) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentListImagesInRecycleBinOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetTransitGatewayRouteTablePropagationsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -49972,7 +49773,7 @@ func (m *awsEc2query_deserializeOpListImagesInRecycleBin) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorListImagesInRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetTransitGatewayRouteTablePropagations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50005,14 +49806,14 @@ func awsEc2query_deserializeOpErrorListImagesInRecycleBin(response *smithyhttp.R } } -type awsEc2query_deserializeOpListSnapshotsInRecycleBin struct { +type awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy struct { } -func (*awsEc2query_deserializeOpListSnapshotsInRecycleBin) ID() string { +func (*awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpListSnapshotsInRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50030,9 +49831,9 @@ func (m *awsEc2query_deserializeOpListSnapshotsInRecycleBin) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorListSnapshotsInRecycleBin(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointPolicy(response, &metadata) } - output := &ListSnapshotsInRecycleBinOutput{} + output := &GetVerifiedAccessEndpointPolicyOutput{} out.Result = output var buff [1024]byte @@ -50053,7 +49854,7 @@ func (m *awsEc2query_deserializeOpListSnapshotsInRecycleBin) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentListSnapshotsInRecycleBinOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetVerifiedAccessEndpointPolicyOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50067,7 +49868,7 @@ func (m *awsEc2query_deserializeOpListSnapshotsInRecycleBin) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorListSnapshotsInRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50100,14 +49901,14 @@ func awsEc2query_deserializeOpErrorListSnapshotsInRecycleBin(response *smithyhtt } } -type awsEc2query_deserializeOpLockSnapshot struct { +type awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets struct { } -func (*awsEc2query_deserializeOpLockSnapshot) ID() string { +func (*awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpLockSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetVerifiedAccessEndpointTargets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50125,9 +49926,9 @@ func (m *awsEc2query_deserializeOpLockSnapshot) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorLockSnapshot(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointTargets(response, &metadata) } - output := &LockSnapshotOutput{} + output := &GetVerifiedAccessEndpointTargetsOutput{} out.Result = output var buff [1024]byte @@ -50148,7 +49949,7 @@ func (m *awsEc2query_deserializeOpLockSnapshot) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentLockSnapshotOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetVerifiedAccessEndpointTargetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50162,7 +49963,7 @@ func (m *awsEc2query_deserializeOpLockSnapshot) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorLockSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetVerifiedAccessEndpointTargets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50195,14 +49996,14 @@ func awsEc2query_deserializeOpErrorLockSnapshot(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpModifyAddressAttribute struct { +type awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy struct { } -func (*awsEc2query_deserializeOpModifyAddressAttribute) ID() string { +func (*awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyAddressAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetVerifiedAccessGroupPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50220,9 +50021,9 @@ func (m *awsEc2query_deserializeOpModifyAddressAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyAddressAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetVerifiedAccessGroupPolicy(response, &metadata) } - output := &ModifyAddressAttributeOutput{} + output := &GetVerifiedAccessGroupPolicyOutput{} out.Result = output var buff [1024]byte @@ -50243,7 +50044,7 @@ func (m *awsEc2query_deserializeOpModifyAddressAttribute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyAddressAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetVerifiedAccessGroupPolicyOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50257,7 +50058,7 @@ func (m *awsEc2query_deserializeOpModifyAddressAttribute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyAddressAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetVerifiedAccessGroupPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50290,14 +50091,14 @@ func awsEc2query_deserializeOpErrorModifyAddressAttribute(response *smithyhttp.R } } -type awsEc2query_deserializeOpModifyAvailabilityZoneGroup struct { +type awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration struct { } -func (*awsEc2query_deserializeOpModifyAvailabilityZoneGroup) ID() string { +func (*awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyAvailabilityZoneGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceSampleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50315,9 +50116,9 @@ func (m *awsEc2query_deserializeOpModifyAvailabilityZoneGroup) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyAvailabilityZoneGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetVpnConnectionDeviceSampleConfiguration(response, &metadata) } - output := &ModifyAvailabilityZoneGroupOutput{} + output := &GetVpnConnectionDeviceSampleConfigurationOutput{} out.Result = output var buff [1024]byte @@ -50338,7 +50139,7 @@ func (m *awsEc2query_deserializeOpModifyAvailabilityZoneGroup) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyAvailabilityZoneGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetVpnConnectionDeviceSampleConfigurationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50352,7 +50153,7 @@ func (m *awsEc2query_deserializeOpModifyAvailabilityZoneGroup) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyAvailabilityZoneGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetVpnConnectionDeviceSampleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50385,14 +50186,14 @@ func awsEc2query_deserializeOpErrorModifyAvailabilityZoneGroup(response *smithyh } } -type awsEc2query_deserializeOpModifyCapacityReservation struct { +type awsEc2query_deserializeOpGetVpnConnectionDeviceTypes struct { } -func (*awsEc2query_deserializeOpModifyCapacityReservation) ID() string { +func (*awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetVpnConnectionDeviceTypes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50410,9 +50211,9 @@ func (m *awsEc2query_deserializeOpModifyCapacityReservation) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyCapacityReservation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetVpnConnectionDeviceTypes(response, &metadata) } - output := &ModifyCapacityReservationOutput{} + output := &GetVpnConnectionDeviceTypesOutput{} out.Result = output var buff [1024]byte @@ -50433,7 +50234,7 @@ func (m *awsEc2query_deserializeOpModifyCapacityReservation) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyCapacityReservationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetVpnConnectionDeviceTypesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50447,7 +50248,7 @@ func (m *awsEc2query_deserializeOpModifyCapacityReservation) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetVpnConnectionDeviceTypes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50480,14 +50281,14 @@ func awsEc2query_deserializeOpErrorModifyCapacityReservation(response *smithyhtt } } -type awsEc2query_deserializeOpModifyCapacityReservationFleet struct { +type awsEc2query_deserializeOpGetVpnTunnelReplacementStatus struct { } -func (*awsEc2query_deserializeOpModifyCapacityReservationFleet) ID() string { +func (*awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyCapacityReservationFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpGetVpnTunnelReplacementStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50505,9 +50306,9 @@ func (m *awsEc2query_deserializeOpModifyCapacityReservationFleet) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyCapacityReservationFleet(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorGetVpnTunnelReplacementStatus(response, &metadata) } - output := &ModifyCapacityReservationFleetOutput{} + output := &GetVpnTunnelReplacementStatusOutput{} out.Result = output var buff [1024]byte @@ -50528,7 +50329,7 @@ func (m *awsEc2query_deserializeOpModifyCapacityReservationFleet) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyCapacityReservationFleetOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentGetVpnTunnelReplacementStatusOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50542,7 +50343,7 @@ func (m *awsEc2query_deserializeOpModifyCapacityReservationFleet) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyCapacityReservationFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorGetVpnTunnelReplacementStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50575,14 +50376,14 @@ func awsEc2query_deserializeOpErrorModifyCapacityReservationFleet(response *smit } } -type awsEc2query_deserializeOpModifyClientVpnEndpoint struct { +type awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList struct { } -func (*awsEc2query_deserializeOpModifyClientVpnEndpoint) ID() string { +func (*awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyClientVpnEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpImportClientVpnClientCertificateRevocationList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50600,9 +50401,9 @@ func (m *awsEc2query_deserializeOpModifyClientVpnEndpoint) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyClientVpnEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorImportClientVpnClientCertificateRevocationList(response, &metadata) } - output := &ModifyClientVpnEndpointOutput{} + output := &ImportClientVpnClientCertificateRevocationListOutput{} out.Result = output var buff [1024]byte @@ -50623,7 +50424,7 @@ func (m *awsEc2query_deserializeOpModifyClientVpnEndpoint) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyClientVpnEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentImportClientVpnClientCertificateRevocationListOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50637,7 +50438,7 @@ func (m *awsEc2query_deserializeOpModifyClientVpnEndpoint) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyClientVpnEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorImportClientVpnClientCertificateRevocationList(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50670,14 +50471,14 @@ func awsEc2query_deserializeOpErrorModifyClientVpnEndpoint(response *smithyhttp. } } -type awsEc2query_deserializeOpModifyDefaultCreditSpecification struct { +type awsEc2query_deserializeOpImportImage struct { } -func (*awsEc2query_deserializeOpModifyDefaultCreditSpecification) ID() string { +func (*awsEc2query_deserializeOpImportImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyDefaultCreditSpecification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpImportImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50695,9 +50496,9 @@ func (m *awsEc2query_deserializeOpModifyDefaultCreditSpecification) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyDefaultCreditSpecification(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorImportImage(response, &metadata) } - output := &ModifyDefaultCreditSpecificationOutput{} + output := &ImportImageOutput{} out.Result = output var buff [1024]byte @@ -50718,7 +50519,7 @@ func (m *awsEc2query_deserializeOpModifyDefaultCreditSpecification) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyDefaultCreditSpecificationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentImportImageOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50732,7 +50533,7 @@ func (m *awsEc2query_deserializeOpModifyDefaultCreditSpecification) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyDefaultCreditSpecification(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorImportImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50765,14 +50566,14 @@ func awsEc2query_deserializeOpErrorModifyDefaultCreditSpecification(response *sm } } -type awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId struct { +type awsEc2query_deserializeOpImportInstance struct { } -func (*awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) ID() string { +func (*awsEc2query_deserializeOpImportInstance) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpImportInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50790,9 +50591,9 @@ func (m *awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyEbsDefaultKmsKeyId(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorImportInstance(response, &metadata) } - output := &ModifyEbsDefaultKmsKeyIdOutput{} + output := &ImportInstanceOutput{} out.Result = output var buff [1024]byte @@ -50813,7 +50614,7 @@ func (m *awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyEbsDefaultKmsKeyIdOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentImportInstanceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50827,7 +50628,7 @@ func (m *awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyEbsDefaultKmsKeyId(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorImportInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50860,14 +50661,14 @@ func awsEc2query_deserializeOpErrorModifyEbsDefaultKmsKeyId(response *smithyhttp } } -type awsEc2query_deserializeOpModifyFleet struct { +type awsEc2query_deserializeOpImportKeyPair struct { } -func (*awsEc2query_deserializeOpModifyFleet) ID() string { +func (*awsEc2query_deserializeOpImportKeyPair) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpImportKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50885,9 +50686,9 @@ func (m *awsEc2query_deserializeOpModifyFleet) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyFleet(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorImportKeyPair(response, &metadata) } - output := &ModifyFleetOutput{} + output := &ImportKeyPairOutput{} out.Result = output var buff [1024]byte @@ -50908,7 +50709,7 @@ func (m *awsEc2query_deserializeOpModifyFleet) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyFleetOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentImportKeyPairOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -50922,7 +50723,7 @@ func (m *awsEc2query_deserializeOpModifyFleet) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorImportKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -50955,14 +50756,14 @@ func awsEc2query_deserializeOpErrorModifyFleet(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpModifyFpgaImageAttribute struct { +type awsEc2query_deserializeOpImportSnapshot struct { } -func (*awsEc2query_deserializeOpModifyFpgaImageAttribute) ID() string { +func (*awsEc2query_deserializeOpImportSnapshot) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyFpgaImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpImportSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -50980,9 +50781,9 @@ func (m *awsEc2query_deserializeOpModifyFpgaImageAttribute) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyFpgaImageAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorImportSnapshot(response, &metadata) } - output := &ModifyFpgaImageAttributeOutput{} + output := &ImportSnapshotOutput{} out.Result = output var buff [1024]byte @@ -51003,7 +50804,7 @@ func (m *awsEc2query_deserializeOpModifyFpgaImageAttribute) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyFpgaImageAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentImportSnapshotOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51017,7 +50818,7 @@ func (m *awsEc2query_deserializeOpModifyFpgaImageAttribute) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyFpgaImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorImportSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51050,14 +50851,14 @@ func awsEc2query_deserializeOpErrorModifyFpgaImageAttribute(response *smithyhttp } } -type awsEc2query_deserializeOpModifyHosts struct { +type awsEc2query_deserializeOpImportVolume struct { } -func (*awsEc2query_deserializeOpModifyHosts) ID() string { +func (*awsEc2query_deserializeOpImportVolume) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpImportVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51075,9 +50876,9 @@ func (m *awsEc2query_deserializeOpModifyHosts) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyHosts(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorImportVolume(response, &metadata) } - output := &ModifyHostsOutput{} + output := &ImportVolumeOutput{} out.Result = output var buff [1024]byte @@ -51098,7 +50899,7 @@ func (m *awsEc2query_deserializeOpModifyHosts) HandleDeserialize(ctx context.Con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyHostsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentImportVolumeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51112,7 +50913,7 @@ func (m *awsEc2query_deserializeOpModifyHosts) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorImportVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51145,14 +50946,14 @@ func awsEc2query_deserializeOpErrorModifyHosts(response *smithyhttp.Response, me } } -type awsEc2query_deserializeOpModifyIdentityIdFormat struct { +type awsEc2query_deserializeOpListImagesInRecycleBin struct { } -func (*awsEc2query_deserializeOpModifyIdentityIdFormat) ID() string { +func (*awsEc2query_deserializeOpListImagesInRecycleBin) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyIdentityIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpListImagesInRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51170,165 +50971,44 @@ func (m *awsEc2query_deserializeOpModifyIdentityIdFormat) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIdentityIdFormat(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorListImagesInRecycleBin(response, &metadata) } - output := &ModifyIdentityIdFormatOutput{} + output := &ListImagesInRecycleBinOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorModifyIdentityIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil } -} - -type awsEc2query_deserializeOpModifyIdFormat struct { -} - -func (*awsEc2query_deserializeOpModifyIdFormat) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpModifyIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIdFormat(response, &metadata) - } - output := &ModifyIdFormatOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorModifyIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentListImagesInRecycleBinOutput(&output, decoder) if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } - return genericError - - } -} - -type awsEc2query_deserializeOpModifyImageAttribute struct { -} - -func (*awsEc2query_deserializeOpModifyImageAttribute) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpModifyImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { return out, metadata, err } - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyImageAttribute(response, &metadata) - } - output := &ModifyImageAttributeOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorListImagesInRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51361,14 +51041,14 @@ func awsEc2query_deserializeOpErrorModifyImageAttribute(response *smithyhttp.Res } } -type awsEc2query_deserializeOpModifyInstanceAttribute struct { +type awsEc2query_deserializeOpListSnapshotsInRecycleBin struct { } -func (*awsEc2query_deserializeOpModifyInstanceAttribute) ID() string { +func (*awsEc2query_deserializeOpListSnapshotsInRecycleBin) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpListSnapshotsInRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51386,21 +51066,44 @@ func (m *awsEc2query_deserializeOpModifyInstanceAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorListSnapshotsInRecycleBin(response, &metadata) } - output := &ModifyInstanceAttributeOutput{} + output := &ListSnapshotsInRecycleBinOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentListSnapshotsInRecycleBinOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorListSnapshotsInRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51433,14 +51136,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceAttribute(response *smithyhttp. } } -type awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes struct { +type awsEc2query_deserializeOpLockSnapshot struct { } -func (*awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) ID() string { +func (*awsEc2query_deserializeOpLockSnapshot) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpLockSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51458,9 +51161,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceCapacityReservationAttributes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorLockSnapshot(response, &metadata) } - output := &ModifyInstanceCapacityReservationAttributesOutput{} + output := &LockSnapshotOutput{} out.Result = output var buff [1024]byte @@ -51481,7 +51184,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceCapacityReservationAttributesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentLockSnapshotOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51495,7 +51198,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) H return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceCapacityReservationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorLockSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51528,14 +51231,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceCapacityReservationAttributes(r } } -type awsEc2query_deserializeOpModifyInstanceCpuOptions struct { +type awsEc2query_deserializeOpModifyAddressAttribute struct { } -func (*awsEc2query_deserializeOpModifyInstanceCpuOptions) ID() string { +func (*awsEc2query_deserializeOpModifyAddressAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceCpuOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyAddressAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51553,9 +51256,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceCpuOptions) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceCpuOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyAddressAttribute(response, &metadata) } - output := &ModifyInstanceCpuOptionsOutput{} + output := &ModifyAddressAttributeOutput{} out.Result = output var buff [1024]byte @@ -51576,7 +51279,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceCpuOptions) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceCpuOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyAddressAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51590,7 +51293,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceCpuOptions) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceCpuOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyAddressAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51623,14 +51326,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceCpuOptions(response *smithyhttp } } -type awsEc2query_deserializeOpModifyInstanceCreditSpecification struct { +type awsEc2query_deserializeOpModifyAvailabilityZoneGroup struct { } -func (*awsEc2query_deserializeOpModifyInstanceCreditSpecification) ID() string { +func (*awsEc2query_deserializeOpModifyAvailabilityZoneGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceCreditSpecification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyAvailabilityZoneGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51648,9 +51351,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceCreditSpecification) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceCreditSpecification(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyAvailabilityZoneGroup(response, &metadata) } - output := &ModifyInstanceCreditSpecificationOutput{} + output := &ModifyAvailabilityZoneGroupOutput{} out.Result = output var buff [1024]byte @@ -51671,7 +51374,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceCreditSpecification) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceCreditSpecificationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyAvailabilityZoneGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51685,7 +51388,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceCreditSpecification) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceCreditSpecification(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyAvailabilityZoneGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51718,14 +51421,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceCreditSpecification(response *s } } -type awsEc2query_deserializeOpModifyInstanceEventStartTime struct { +type awsEc2query_deserializeOpModifyCapacityReservation struct { } -func (*awsEc2query_deserializeOpModifyInstanceEventStartTime) ID() string { +func (*awsEc2query_deserializeOpModifyCapacityReservation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceEventStartTime) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51743,9 +51446,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceEventStartTime) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceEventStartTime(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyCapacityReservation(response, &metadata) } - output := &ModifyInstanceEventStartTimeOutput{} + output := &ModifyCapacityReservationOutput{} out.Result = output var buff [1024]byte @@ -51766,7 +51469,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceEventStartTime) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceEventStartTimeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyCapacityReservationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51780,7 +51483,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceEventStartTime) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceEventStartTime(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51813,14 +51516,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceEventStartTime(response *smithy } } -type awsEc2query_deserializeOpModifyInstanceEventWindow struct { +type awsEc2query_deserializeOpModifyCapacityReservationFleet struct { } -func (*awsEc2query_deserializeOpModifyInstanceEventWindow) ID() string { +func (*awsEc2query_deserializeOpModifyCapacityReservationFleet) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyCapacityReservationFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51838,9 +51541,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceEventWindow) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceEventWindow(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyCapacityReservationFleet(response, &metadata) } - output := &ModifyInstanceEventWindowOutput{} + output := &ModifyCapacityReservationFleetOutput{} out.Result = output var buff [1024]byte @@ -51861,7 +51564,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceEventWindow) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceEventWindowOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyCapacityReservationFleetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51875,7 +51578,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceEventWindow) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyCapacityReservationFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -51908,14 +51611,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceEventWindow(response *smithyhtt } } -type awsEc2query_deserializeOpModifyInstanceMaintenanceOptions struct { +type awsEc2query_deserializeOpModifyClientVpnEndpoint struct { } -func (*awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) ID() string { +func (*awsEc2query_deserializeOpModifyClientVpnEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyClientVpnEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -51933,9 +51636,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceMaintenanceOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyClientVpnEndpoint(response, &metadata) } - output := &ModifyInstanceMaintenanceOptionsOutput{} + output := &ModifyClientVpnEndpointOutput{} out.Result = output var buff [1024]byte @@ -51956,7 +51659,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceMaintenanceOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyClientVpnEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -51970,7 +51673,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceMaintenanceOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyClientVpnEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52003,14 +51706,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceMaintenanceOptions(response *sm } } -type awsEc2query_deserializeOpModifyInstanceMetadataDefaults struct { +type awsEc2query_deserializeOpModifyDefaultCreditSpecification struct { } -func (*awsEc2query_deserializeOpModifyInstanceMetadataDefaults) ID() string { +func (*awsEc2query_deserializeOpModifyDefaultCreditSpecification) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceMetadataDefaults) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyDefaultCreditSpecification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52028,9 +51731,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceMetadataDefaults) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceMetadataDefaults(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyDefaultCreditSpecification(response, &metadata) } - output := &ModifyInstanceMetadataDefaultsOutput{} + output := &ModifyDefaultCreditSpecificationOutput{} out.Result = output var buff [1024]byte @@ -52051,7 +51754,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceMetadataDefaults) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceMetadataDefaultsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyDefaultCreditSpecificationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52065,7 +51768,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceMetadataDefaults) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceMetadataDefaults(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyDefaultCreditSpecification(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52098,14 +51801,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceMetadataDefaults(response *smit } } -type awsEc2query_deserializeOpModifyInstanceMetadataOptions struct { +type awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId struct { } -func (*awsEc2query_deserializeOpModifyInstanceMetadataOptions) ID() string { +func (*awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceMetadataOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyEbsDefaultKmsKeyId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52123,9 +51826,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceMetadataOptions) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceMetadataOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyEbsDefaultKmsKeyId(response, &metadata) } - output := &ModifyInstanceMetadataOptionsOutput{} + output := &ModifyEbsDefaultKmsKeyIdOutput{} out.Result = output var buff [1024]byte @@ -52146,7 +51849,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceMetadataOptions) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceMetadataOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyEbsDefaultKmsKeyIdOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52160,7 +51863,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceMetadataOptions) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceMetadataOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyEbsDefaultKmsKeyId(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52193,14 +51896,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceMetadataOptions(response *smith } } -type awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions struct { +type awsEc2query_deserializeOpModifyFleet struct { } -func (*awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) ID() string { +func (*awsEc2query_deserializeOpModifyFleet) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52218,9 +51921,9 @@ func (m *awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceNetworkPerformanceOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyFleet(response, &metadata) } - output := &ModifyInstanceNetworkPerformanceOptionsOutput{} + output := &ModifyFleetOutput{} out.Result = output var buff [1024]byte @@ -52241,7 +51944,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstanceNetworkPerformanceOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyFleetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52255,7 +51958,7 @@ func (m *awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstanceNetworkPerformanceOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52288,14 +51991,14 @@ func awsEc2query_deserializeOpErrorModifyInstanceNetworkPerformanceOptions(respo } } -type awsEc2query_deserializeOpModifyInstancePlacement struct { +type awsEc2query_deserializeOpModifyFpgaImageAttribute struct { } -func (*awsEc2query_deserializeOpModifyInstancePlacement) ID() string { +func (*awsEc2query_deserializeOpModifyFpgaImageAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyInstancePlacement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyFpgaImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52313,9 +52016,9 @@ func (m *awsEc2query_deserializeOpModifyInstancePlacement) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyInstancePlacement(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyFpgaImageAttribute(response, &metadata) } - output := &ModifyInstancePlacementOutput{} + output := &ModifyFpgaImageAttributeOutput{} out.Result = output var buff [1024]byte @@ -52336,7 +52039,7 @@ func (m *awsEc2query_deserializeOpModifyInstancePlacement) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyInstancePlacementOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyFpgaImageAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52350,7 +52053,7 @@ func (m *awsEc2query_deserializeOpModifyInstancePlacement) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyInstancePlacement(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyFpgaImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52383,14 +52086,14 @@ func awsEc2query_deserializeOpErrorModifyInstancePlacement(response *smithyhttp. } } -type awsEc2query_deserializeOpModifyIpam struct { +type awsEc2query_deserializeOpModifyHosts struct { } -func (*awsEc2query_deserializeOpModifyIpam) ID() string { +func (*awsEc2query_deserializeOpModifyHosts) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52408,9 +52111,9 @@ func (m *awsEc2query_deserializeOpModifyIpam) HandleDeserialize(ctx context.Cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIpam(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyHosts(response, &metadata) } - output := &ModifyIpamOutput{} + output := &ModifyHostsOutput{} out.Result = output var buff [1024]byte @@ -52431,7 +52134,7 @@ func (m *awsEc2query_deserializeOpModifyIpam) HandleDeserialize(ctx context.Cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyIpamOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyHostsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52445,7 +52148,7 @@ func (m *awsEc2query_deserializeOpModifyIpam) HandleDeserialize(ctx context.Cont return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52478,14 +52181,14 @@ func awsEc2query_deserializeOpErrorModifyIpam(response *smithyhttp.Response, met } } -type awsEc2query_deserializeOpModifyIpamPool struct { +type awsEc2query_deserializeOpModifyIdentityIdFormat struct { } -func (*awsEc2query_deserializeOpModifyIpamPool) ID() string { +func (*awsEc2query_deserializeOpModifyIdentityIdFormat) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyIpamPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIdentityIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52503,44 +52206,21 @@ func (m *awsEc2query_deserializeOpModifyIpamPool) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIpamPool(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIdentityIdFormat(response, &metadata) } - output := &ModifyIpamPoolOutput{} + output := &ModifyIdentityIdFormatOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyIpamPoolOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyIpamPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIdentityIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52573,14 +52253,14 @@ func awsEc2query_deserializeOpErrorModifyIpamPool(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpModifyIpamResourceCidr struct { +type awsEc2query_deserializeOpModifyIdFormat struct { } -func (*awsEc2query_deserializeOpModifyIpamResourceCidr) ID() string { +func (*awsEc2query_deserializeOpModifyIdFormat) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyIpamResourceCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIdFormat) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52598,44 +52278,21 @@ func (m *awsEc2query_deserializeOpModifyIpamResourceCidr) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIpamResourceCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIdFormat(response, &metadata) } - output := &ModifyIpamResourceCidrOutput{} + output := &ModifyIdFormatOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyIpamResourceCidrOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyIpamResourceCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIdFormat(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52668,14 +52325,14 @@ func awsEc2query_deserializeOpErrorModifyIpamResourceCidr(response *smithyhttp.R } } -type awsEc2query_deserializeOpModifyIpamResourceDiscovery struct { +type awsEc2query_deserializeOpModifyImageAttribute struct { } -func (*awsEc2query_deserializeOpModifyIpamResourceDiscovery) ID() string { +func (*awsEc2query_deserializeOpModifyImageAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52693,9 +52350,153 @@ func (m *awsEc2query_deserializeOpModifyIpamResourceDiscovery) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIpamResourceDiscovery(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyImageAttribute(response, &metadata) } - output := &ModifyIpamResourceDiscoveryOutput{} + output := &ModifyImageAttributeOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorModifyImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpModifyInstanceAttribute struct { +} + +func (*awsEc2query_deserializeOpModifyInstanceAttribute) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpModifyInstanceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceAttribute(response, &metadata) + } + output := &ModifyInstanceAttributeOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorModifyInstanceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes struct { +} + +func (*awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpModifyInstanceCapacityReservationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceCapacityReservationAttributes(response, &metadata) + } + output := &ModifyInstanceCapacityReservationAttributesOutput{} out.Result = output var buff [1024]byte @@ -52716,7 +52517,7 @@ func (m *awsEc2query_deserializeOpModifyIpamResourceDiscovery) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyIpamResourceDiscoveryOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceCapacityReservationAttributesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52730,7 +52531,7 @@ func (m *awsEc2query_deserializeOpModifyIpamResourceDiscovery) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceCapacityReservationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52763,14 +52564,14 @@ func awsEc2query_deserializeOpErrorModifyIpamResourceDiscovery(response *smithyh } } -type awsEc2query_deserializeOpModifyIpamScope struct { +type awsEc2query_deserializeOpModifyInstanceConnectEndpoint struct { } -func (*awsEc2query_deserializeOpModifyIpamScope) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceConnectEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyIpamScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceConnectEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52788,9 +52589,9 @@ func (m *awsEc2query_deserializeOpModifyIpamScope) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyIpamScope(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceConnectEndpoint(response, &metadata) } - output := &ModifyIpamScopeOutput{} + output := &ModifyInstanceConnectEndpointOutput{} out.Result = output var buff [1024]byte @@ -52811,7 +52612,7 @@ func (m *awsEc2query_deserializeOpModifyIpamScope) HandleDeserialize(ctx context } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyIpamScopeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceConnectEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52825,7 +52626,7 @@ func (m *awsEc2query_deserializeOpModifyIpamScope) HandleDeserialize(ctx context return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyIpamScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceConnectEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52858,14 +52659,14 @@ func awsEc2query_deserializeOpErrorModifyIpamScope(response *smithyhttp.Response } } -type awsEc2query_deserializeOpModifyLaunchTemplate struct { +type awsEc2query_deserializeOpModifyInstanceCpuOptions struct { } -func (*awsEc2query_deserializeOpModifyLaunchTemplate) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceCpuOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyLaunchTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceCpuOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52883,9 +52684,9 @@ func (m *awsEc2query_deserializeOpModifyLaunchTemplate) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyLaunchTemplate(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceCpuOptions(response, &metadata) } - output := &ModifyLaunchTemplateOutput{} + output := &ModifyInstanceCpuOptionsOutput{} out.Result = output var buff [1024]byte @@ -52906,7 +52707,7 @@ func (m *awsEc2query_deserializeOpModifyLaunchTemplate) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyLaunchTemplateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceCpuOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -52920,7 +52721,7 @@ func (m *awsEc2query_deserializeOpModifyLaunchTemplate) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyLaunchTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceCpuOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -52953,14 +52754,14 @@ func awsEc2query_deserializeOpErrorModifyLaunchTemplate(response *smithyhttp.Res } } -type awsEc2query_deserializeOpModifyLocalGatewayRoute struct { +type awsEc2query_deserializeOpModifyInstanceCreditSpecification struct { } -func (*awsEc2query_deserializeOpModifyLocalGatewayRoute) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceCreditSpecification) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyLocalGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceCreditSpecification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -52978,9 +52779,9 @@ func (m *awsEc2query_deserializeOpModifyLocalGatewayRoute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyLocalGatewayRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceCreditSpecification(response, &metadata) } - output := &ModifyLocalGatewayRouteOutput{} + output := &ModifyInstanceCreditSpecificationOutput{} out.Result = output var buff [1024]byte @@ -53001,7 +52802,7 @@ func (m *awsEc2query_deserializeOpModifyLocalGatewayRoute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyLocalGatewayRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceCreditSpecificationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53015,7 +52816,7 @@ func (m *awsEc2query_deserializeOpModifyLocalGatewayRoute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyLocalGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceCreditSpecification(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53048,14 +52849,14 @@ func awsEc2query_deserializeOpErrorModifyLocalGatewayRoute(response *smithyhttp. } } -type awsEc2query_deserializeOpModifyManagedPrefixList struct { +type awsEc2query_deserializeOpModifyInstanceEventStartTime struct { } -func (*awsEc2query_deserializeOpModifyManagedPrefixList) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceEventStartTime) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyManagedPrefixList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceEventStartTime) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53073,9 +52874,9 @@ func (m *awsEc2query_deserializeOpModifyManagedPrefixList) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyManagedPrefixList(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceEventStartTime(response, &metadata) } - output := &ModifyManagedPrefixListOutput{} + output := &ModifyInstanceEventStartTimeOutput{} out.Result = output var buff [1024]byte @@ -53096,7 +52897,7 @@ func (m *awsEc2query_deserializeOpModifyManagedPrefixList) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyManagedPrefixListOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceEventStartTimeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53110,7 +52911,7 @@ func (m *awsEc2query_deserializeOpModifyManagedPrefixList) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyManagedPrefixList(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceEventStartTime(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53143,14 +52944,14 @@ func awsEc2query_deserializeOpErrorModifyManagedPrefixList(response *smithyhttp. } } -type awsEc2query_deserializeOpModifyNetworkInterfaceAttribute struct { +type awsEc2query_deserializeOpModifyInstanceEventWindow struct { } -func (*awsEc2query_deserializeOpModifyNetworkInterfaceAttribute) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceEventWindow) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyNetworkInterfaceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceEventWindow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53168,21 +52969,44 @@ func (m *awsEc2query_deserializeOpModifyNetworkInterfaceAttribute) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyNetworkInterfaceAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceEventWindow(response, &metadata) } - output := &ModifyNetworkInterfaceAttributeOutput{} + output := &ModifyInstanceEventWindowOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentModifyInstanceEventWindowOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyNetworkInterfaceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceEventWindow(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53215,14 +53039,14 @@ func awsEc2query_deserializeOpErrorModifyNetworkInterfaceAttribute(response *smi } } -type awsEc2query_deserializeOpModifyPrivateDnsNameOptions struct { +type awsEc2query_deserializeOpModifyInstanceMaintenanceOptions struct { } -func (*awsEc2query_deserializeOpModifyPrivateDnsNameOptions) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyPrivateDnsNameOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceMaintenanceOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53240,9 +53064,9 @@ func (m *awsEc2query_deserializeOpModifyPrivateDnsNameOptions) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyPrivateDnsNameOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceMaintenanceOptions(response, &metadata) } - output := &ModifyPrivateDnsNameOptionsOutput{} + output := &ModifyInstanceMaintenanceOptionsOutput{} out.Result = output var buff [1024]byte @@ -53263,7 +53087,7 @@ func (m *awsEc2query_deserializeOpModifyPrivateDnsNameOptions) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyPrivateDnsNameOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceMaintenanceOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53277,7 +53101,7 @@ func (m *awsEc2query_deserializeOpModifyPrivateDnsNameOptions) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyPrivateDnsNameOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceMaintenanceOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53310,14 +53134,14 @@ func awsEc2query_deserializeOpErrorModifyPrivateDnsNameOptions(response *smithyh } } -type awsEc2query_deserializeOpModifyReservedInstances struct { +type awsEc2query_deserializeOpModifyInstanceMetadataDefaults struct { } -func (*awsEc2query_deserializeOpModifyReservedInstances) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceMetadataDefaults) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyReservedInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceMetadataDefaults) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53335,9 +53159,9 @@ func (m *awsEc2query_deserializeOpModifyReservedInstances) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyReservedInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceMetadataDefaults(response, &metadata) } - output := &ModifyReservedInstancesOutput{} + output := &ModifyInstanceMetadataDefaultsOutput{} out.Result = output var buff [1024]byte @@ -53358,7 +53182,7 @@ func (m *awsEc2query_deserializeOpModifyReservedInstances) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyReservedInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceMetadataDefaultsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53372,7 +53196,7 @@ func (m *awsEc2query_deserializeOpModifyReservedInstances) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyReservedInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceMetadataDefaults(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53405,14 +53229,14 @@ func awsEc2query_deserializeOpErrorModifyReservedInstances(response *smithyhttp. } } -type awsEc2query_deserializeOpModifyRouteServer struct { +type awsEc2query_deserializeOpModifyInstanceMetadataOptions struct { } -func (*awsEc2query_deserializeOpModifyRouteServer) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceMetadataOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceMetadataOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53430,9 +53254,9 @@ func (m *awsEc2query_deserializeOpModifyRouteServer) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyRouteServer(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceMetadataOptions(response, &metadata) } - output := &ModifyRouteServerOutput{} + output := &ModifyInstanceMetadataOptionsOutput{} out.Result = output var buff [1024]byte @@ -53453,7 +53277,7 @@ func (m *awsEc2query_deserializeOpModifyRouteServer) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyRouteServerOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceMetadataOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53467,7 +53291,7 @@ func (m *awsEc2query_deserializeOpModifyRouteServer) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceMetadataOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53500,14 +53324,14 @@ func awsEc2query_deserializeOpErrorModifyRouteServer(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpModifySecurityGroupRules struct { +type awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions struct { } -func (*awsEc2query_deserializeOpModifySecurityGroupRules) ID() string { +func (*awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifySecurityGroupRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstanceNetworkPerformanceOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53525,9 +53349,9 @@ func (m *awsEc2query_deserializeOpModifySecurityGroupRules) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifySecurityGroupRules(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstanceNetworkPerformanceOptions(response, &metadata) } - output := &ModifySecurityGroupRulesOutput{} + output := &ModifyInstanceNetworkPerformanceOptionsOutput{} out.Result = output var buff [1024]byte @@ -53548,7 +53372,7 @@ func (m *awsEc2query_deserializeOpModifySecurityGroupRules) HandleDeserialize(ct } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifySecurityGroupRulesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyInstanceNetworkPerformanceOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53562,7 +53386,7 @@ func (m *awsEc2query_deserializeOpModifySecurityGroupRules) HandleDeserialize(ct return out, metadata, err } -func awsEc2query_deserializeOpErrorModifySecurityGroupRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstanceNetworkPerformanceOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53595,14 +53419,14 @@ func awsEc2query_deserializeOpErrorModifySecurityGroupRules(response *smithyhttp } } -type awsEc2query_deserializeOpModifySnapshotAttribute struct { +type awsEc2query_deserializeOpModifyInstancePlacement struct { } -func (*awsEc2query_deserializeOpModifySnapshotAttribute) ID() string { +func (*awsEc2query_deserializeOpModifyInstancePlacement) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifySnapshotAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyInstancePlacement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53620,21 +53444,44 @@ func (m *awsEc2query_deserializeOpModifySnapshotAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifySnapshotAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyInstancePlacement(response, &metadata) } - output := &ModifySnapshotAttributeOutput{} + output := &ModifyInstancePlacementOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentModifyInstancePlacementOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifySnapshotAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyInstancePlacement(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53667,14 +53514,14 @@ func awsEc2query_deserializeOpErrorModifySnapshotAttribute(response *smithyhttp. } } -type awsEc2query_deserializeOpModifySnapshotTier struct { +type awsEc2query_deserializeOpModifyIpam struct { } -func (*awsEc2query_deserializeOpModifySnapshotTier) ID() string { +func (*awsEc2query_deserializeOpModifyIpam) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifySnapshotTier) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53692,9 +53539,9 @@ func (m *awsEc2query_deserializeOpModifySnapshotTier) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifySnapshotTier(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIpam(response, &metadata) } - output := &ModifySnapshotTierOutput{} + output := &ModifyIpamOutput{} out.Result = output var buff [1024]byte @@ -53715,7 +53562,7 @@ func (m *awsEc2query_deserializeOpModifySnapshotTier) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifySnapshotTierOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyIpamOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53729,7 +53576,7 @@ func (m *awsEc2query_deserializeOpModifySnapshotTier) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorModifySnapshotTier(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53762,14 +53609,14 @@ func awsEc2query_deserializeOpErrorModifySnapshotTier(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpModifySpotFleetRequest struct { +type awsEc2query_deserializeOpModifyIpamPool struct { } -func (*awsEc2query_deserializeOpModifySpotFleetRequest) ID() string { +func (*awsEc2query_deserializeOpModifyIpamPool) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifySpotFleetRequest) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIpamPool) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53787,9 +53634,9 @@ func (m *awsEc2query_deserializeOpModifySpotFleetRequest) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifySpotFleetRequest(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIpamPool(response, &metadata) } - output := &ModifySpotFleetRequestOutput{} + output := &ModifyIpamPoolOutput{} out.Result = output var buff [1024]byte @@ -53810,7 +53657,7 @@ func (m *awsEc2query_deserializeOpModifySpotFleetRequest) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifySpotFleetRequestOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyIpamPoolOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53824,7 +53671,7 @@ func (m *awsEc2query_deserializeOpModifySpotFleetRequest) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifySpotFleetRequest(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIpamPool(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53857,14 +53704,14 @@ func awsEc2query_deserializeOpErrorModifySpotFleetRequest(response *smithyhttp.R } } -type awsEc2query_deserializeOpModifySubnetAttribute struct { +type awsEc2query_deserializeOpModifyIpamResourceCidr struct { } -func (*awsEc2query_deserializeOpModifySubnetAttribute) ID() string { +func (*awsEc2query_deserializeOpModifyIpamResourceCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifySubnetAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIpamResourceCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53882,21 +53729,44 @@ func (m *awsEc2query_deserializeOpModifySubnetAttribute) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifySubnetAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIpamResourceCidr(response, &metadata) } - output := &ModifySubnetAttributeOutput{} + output := &ModifyIpamResourceCidrOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentModifyIpamResourceCidrOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifySubnetAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIpamResourceCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -53929,14 +53799,14 @@ func awsEc2query_deserializeOpErrorModifySubnetAttribute(response *smithyhttp.Re } } -type awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices struct { +type awsEc2query_deserializeOpModifyIpamResourceDiscovery struct { } -func (*awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) ID() string { +func (*awsEc2query_deserializeOpModifyIpamResourceDiscovery) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIpamResourceDiscovery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -53954,9 +53824,9 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) Hand } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterNetworkServices(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIpamResourceDiscovery(response, &metadata) } - output := &ModifyTrafficMirrorFilterNetworkServicesOutput{} + output := &ModifyIpamResourceDiscoveryOutput{} out.Result = output var buff [1024]byte @@ -53977,7 +53847,7 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) Hand } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyTrafficMirrorFilterNetworkServicesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyIpamResourceDiscoveryOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -53991,7 +53861,7 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) Hand return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterNetworkServices(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIpamResourceDiscovery(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54024,14 +53894,14 @@ func awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterNetworkServices(resp } } -type awsEc2query_deserializeOpModifyTrafficMirrorFilterRule struct { +type awsEc2query_deserializeOpModifyIpamScope struct { } -func (*awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) ID() string { +func (*awsEc2query_deserializeOpModifyIpamScope) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyIpamScope) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54049,9 +53919,9 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterRule(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyIpamScope(response, &metadata) } - output := &ModifyTrafficMirrorFilterRuleOutput{} + output := &ModifyIpamScopeOutput{} out.Result = output var buff [1024]byte @@ -54072,7 +53942,7 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyTrafficMirrorFilterRuleOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyIpamScopeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54086,7 +53956,7 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyIpamScope(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54119,14 +53989,14 @@ func awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterRule(response *smith } } -type awsEc2query_deserializeOpModifyTrafficMirrorSession struct { +type awsEc2query_deserializeOpModifyLaunchTemplate struct { } -func (*awsEc2query_deserializeOpModifyTrafficMirrorSession) ID() string { +func (*awsEc2query_deserializeOpModifyLaunchTemplate) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyTrafficMirrorSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyLaunchTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54144,9 +54014,9 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorSession) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyTrafficMirrorSession(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyLaunchTemplate(response, &metadata) } - output := &ModifyTrafficMirrorSessionOutput{} + output := &ModifyLaunchTemplateOutput{} out.Result = output var buff [1024]byte @@ -54167,7 +54037,7 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorSession) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyTrafficMirrorSessionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyLaunchTemplateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54181,7 +54051,7 @@ func (m *awsEc2query_deserializeOpModifyTrafficMirrorSession) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyTrafficMirrorSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyLaunchTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54214,14 +54084,14 @@ func awsEc2query_deserializeOpErrorModifyTrafficMirrorSession(response *smithyht } } -type awsEc2query_deserializeOpModifyTransitGateway struct { +type awsEc2query_deserializeOpModifyLocalGatewayRoute struct { } -func (*awsEc2query_deserializeOpModifyTransitGateway) ID() string { +func (*awsEc2query_deserializeOpModifyLocalGatewayRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyTransitGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyLocalGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54239,9 +54109,9 @@ func (m *awsEc2query_deserializeOpModifyTransitGateway) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyTransitGateway(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyLocalGatewayRoute(response, &metadata) } - output := &ModifyTransitGatewayOutput{} + output := &ModifyLocalGatewayRouteOutput{} out.Result = output var buff [1024]byte @@ -54262,7 +54132,7 @@ func (m *awsEc2query_deserializeOpModifyTransitGateway) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyTransitGatewayOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyLocalGatewayRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54276,7 +54146,7 @@ func (m *awsEc2query_deserializeOpModifyTransitGateway) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyTransitGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyLocalGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54309,14 +54179,14 @@ func awsEc2query_deserializeOpErrorModifyTransitGateway(response *smithyhttp.Res } } -type awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference struct { +type awsEc2query_deserializeOpModifyManagedPrefixList struct { } -func (*awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) ID() string { +func (*awsEc2query_deserializeOpModifyManagedPrefixList) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyManagedPrefixList) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54334,9 +54204,9 @@ func (m *awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyTransitGatewayPrefixListReference(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyManagedPrefixList(response, &metadata) } - output := &ModifyTransitGatewayPrefixListReferenceOutput{} + output := &ModifyManagedPrefixListOutput{} out.Result = output var buff [1024]byte @@ -54357,7 +54227,7 @@ func (m *awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyTransitGatewayPrefixListReferenceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyManagedPrefixListOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54371,7 +54241,7 @@ func (m *awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyTransitGatewayPrefixListReference(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyManagedPrefixList(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54404,14 +54274,14 @@ func awsEc2query_deserializeOpErrorModifyTransitGatewayPrefixListReference(respo } } -type awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment struct { +type awsEc2query_deserializeOpModifyNetworkInterfaceAttribute struct { } -func (*awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment) ID() string { +func (*awsEc2query_deserializeOpModifyNetworkInterfaceAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyNetworkInterfaceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54429,44 +54299,21 @@ func (m *awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyTransitGatewayVpcAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyNetworkInterfaceAttribute(response, &metadata) } - output := &ModifyTransitGatewayVpcAttachmentOutput{} + output := &ModifyNetworkInterfaceAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyTransitGatewayVpcAttachmentOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyNetworkInterfaceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54499,14 +54346,14 @@ func awsEc2query_deserializeOpErrorModifyTransitGatewayVpcAttachment(response *s } } -type awsEc2query_deserializeOpModifyVerifiedAccessEndpoint struct { +type awsEc2query_deserializeOpModifyPrivateDnsNameOptions struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) ID() string { +func (*awsEc2query_deserializeOpModifyPrivateDnsNameOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyPrivateDnsNameOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54524,9 +54371,9 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyPrivateDnsNameOptions(response, &metadata) } - output := &ModifyVerifiedAccessEndpointOutput{} + output := &ModifyPrivateDnsNameOptionsOutput{} out.Result = output var buff [1024]byte @@ -54547,7 +54394,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyPrivateDnsNameOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54561,7 +54408,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyPrivateDnsNameOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54594,14 +54441,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpoint(response *smithy } } -type awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy struct { +type awsEc2query_deserializeOpModifyPublicIpDnsNameOptions struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) ID() string { +func (*awsEc2query_deserializeOpModifyPublicIpDnsNameOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyPublicIpDnsNameOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54619,9 +54466,9 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) HandleDese } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpointPolicy(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyPublicIpDnsNameOptions(response, &metadata) } - output := &ModifyVerifiedAccessEndpointPolicyOutput{} + output := &ModifyPublicIpDnsNameOptionsOutput{} out.Result = output var buff [1024]byte @@ -54642,7 +54489,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) HandleDese } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessEndpointPolicyOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyPublicIpDnsNameOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54656,7 +54503,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) HandleDese return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyPublicIpDnsNameOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54689,14 +54536,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpointPolicy(response * } } -type awsEc2query_deserializeOpModifyVerifiedAccessGroup struct { +type awsEc2query_deserializeOpModifyReservedInstances struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessGroup) ID() string { +func (*awsEc2query_deserializeOpModifyReservedInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyReservedInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54714,9 +54561,9 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroup) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessGroup(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyReservedInstances(response, &metadata) } - output := &ModifyVerifiedAccessGroupOutput{} + output := &ModifyReservedInstancesOutput{} out.Result = output var buff [1024]byte @@ -54737,7 +54584,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroup) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessGroupOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyReservedInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54751,7 +54598,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroup) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyReservedInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54784,14 +54631,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessGroup(response *smithyhtt } } -type awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy struct { +type awsEc2query_deserializeOpModifyRouteServer struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) ID() string { +func (*awsEc2query_deserializeOpModifyRouteServer) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyRouteServer) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54809,9 +54656,9 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessGroupPolicy(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyRouteServer(response, &metadata) } - output := &ModifyVerifiedAccessGroupPolicyOutput{} + output := &ModifyRouteServerOutput{} out.Result = output var buff [1024]byte @@ -54832,7 +54679,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessGroupPolicyOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyRouteServerOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54846,7 +54693,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessGroupPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyRouteServer(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54879,14 +54726,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessGroupPolicy(response *smi } } -type awsEc2query_deserializeOpModifyVerifiedAccessInstance struct { +type awsEc2query_deserializeOpModifySecurityGroupRules struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessInstance) ID() string { +func (*awsEc2query_deserializeOpModifySecurityGroupRules) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifySecurityGroupRules) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54904,9 +54751,9 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstance) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessInstance(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifySecurityGroupRules(response, &metadata) } - output := &ModifyVerifiedAccessInstanceOutput{} + output := &ModifySecurityGroupRulesOutput{} out.Result = output var buff [1024]byte @@ -54927,7 +54774,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstance) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessInstanceOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifySecurityGroupRulesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -54941,7 +54788,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstance) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifySecurityGroupRules(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -54974,14 +54821,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessInstance(response *smithy } } -type awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfiguration struct { +type awsEc2query_deserializeOpModifySnapshotAttribute struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfiguration) ID() string { +func (*awsEc2query_deserializeOpModifySnapshotAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifySnapshotAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -54999,44 +54846,21 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfigurati } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessInstanceLoggingConfiguration(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifySnapshotAttribute(response, &metadata) } - output := &ModifyVerifiedAccessInstanceLoggingConfigurationOutput{} + output := &ModifySnapshotAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessInstanceLoggingConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessInstanceLoggingConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifySnapshotAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55069,14 +54893,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessInstanceLoggingConfigurat } } -type awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider struct { +type awsEc2query_deserializeOpModifySnapshotTier struct { } -func (*awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) ID() string { +func (*awsEc2query_deserializeOpModifySnapshotTier) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifySnapshotTier) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55094,9 +54918,9 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessTrustProvider(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifySnapshotTier(response, &metadata) } - output := &ModifyVerifiedAccessTrustProviderOutput{} + output := &ModifySnapshotTierOutput{} out.Result = output var buff [1024]byte @@ -55117,7 +54941,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessTrustProviderOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifySnapshotTierOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55131,7 +54955,7 @@ func (m *awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifySnapshotTier(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55164,14 +54988,14 @@ func awsEc2query_deserializeOpErrorModifyVerifiedAccessTrustProvider(response *s } } -type awsEc2query_deserializeOpModifyVolume struct { +type awsEc2query_deserializeOpModifySpotFleetRequest struct { } -func (*awsEc2query_deserializeOpModifyVolume) ID() string { +func (*awsEc2query_deserializeOpModifySpotFleetRequest) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifySpotFleetRequest) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55189,9 +55013,9 @@ func (m *awsEc2query_deserializeOpModifyVolume) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVolume(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifySpotFleetRequest(response, &metadata) } - output := &ModifyVolumeOutput{} + output := &ModifySpotFleetRequestOutput{} out.Result = output var buff [1024]byte @@ -55212,7 +55036,7 @@ func (m *awsEc2query_deserializeOpModifyVolume) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVolumeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifySpotFleetRequestOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55226,7 +55050,7 @@ func (m *awsEc2query_deserializeOpModifyVolume) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifySpotFleetRequest(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55259,14 +55083,14 @@ func awsEc2query_deserializeOpErrorModifyVolume(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpModifyVolumeAttribute struct { +type awsEc2query_deserializeOpModifySubnetAttribute struct { } -func (*awsEc2query_deserializeOpModifyVolumeAttribute) ID() string { +func (*awsEc2query_deserializeOpModifySubnetAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVolumeAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifySubnetAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55284,9 +55108,9 @@ func (m *awsEc2query_deserializeOpModifyVolumeAttribute) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVolumeAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifySubnetAttribute(response, &metadata) } - output := &ModifyVolumeAttributeOutput{} + output := &ModifySubnetAttributeOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -55298,7 +55122,7 @@ func (m *awsEc2query_deserializeOpModifyVolumeAttribute) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVolumeAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifySubnetAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55331,14 +55155,14 @@ func awsEc2query_deserializeOpErrorModifyVolumeAttribute(response *smithyhttp.Re } } -type awsEc2query_deserializeOpModifyVpcAttribute struct { +type awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices struct { } -func (*awsEc2query_deserializeOpModifyVpcAttribute) ID() string { +func (*awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterNetworkServices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55356,21 +55180,44 @@ func (m *awsEc2query_deserializeOpModifyVpcAttribute) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterNetworkServices(response, &metadata) } - output := &ModifyVpcAttributeOutput{} + output := &ModifyTrafficMirrorFilterNetworkServicesOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentModifyTrafficMirrorFilterNetworkServicesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterNetworkServices(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55403,14 +55250,14 @@ func awsEc2query_deserializeOpErrorModifyVpcAttribute(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion struct { +type awsEc2query_deserializeOpModifyTrafficMirrorFilterRule struct { } -func (*awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) ID() string { +func (*awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyTrafficMirrorFilterRule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55428,9 +55275,9 @@ func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessExclusion(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterRule(response, &metadata) } - output := &ModifyVpcBlockPublicAccessExclusionOutput{} + output := &ModifyTrafficMirrorFilterRuleOutput{} out.Result = output var buff [1024]byte @@ -55451,7 +55298,7 @@ func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcBlockPublicAccessExclusionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyTrafficMirrorFilterRuleOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55465,7 +55312,7 @@ func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyTrafficMirrorFilterRule(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55498,14 +55345,14 @@ func awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessExclusion(response } } -type awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions struct { +type awsEc2query_deserializeOpModifyTrafficMirrorSession struct { } -func (*awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) ID() string { +func (*awsEc2query_deserializeOpModifyTrafficMirrorSession) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyTrafficMirrorSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55523,9 +55370,9 @@ func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyTrafficMirrorSession(response, &metadata) } - output := &ModifyVpcBlockPublicAccessOptionsOutput{} + output := &ModifyTrafficMirrorSessionOutput{} out.Result = output var buff [1024]byte @@ -55546,7 +55393,7 @@ func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcBlockPublicAccessOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyTrafficMirrorSessionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55560,7 +55407,7 @@ func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyTrafficMirrorSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55593,14 +55440,14 @@ func awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessOptions(response *s } } -type awsEc2query_deserializeOpModifyVpcEndpoint struct { +type awsEc2query_deserializeOpModifyTransitGateway struct { } -func (*awsEc2query_deserializeOpModifyVpcEndpoint) ID() string { +func (*awsEc2query_deserializeOpModifyTransitGateway) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyTransitGateway) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55618,9 +55465,9 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpoint) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpoint(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyTransitGateway(response, &metadata) } - output := &ModifyVpcEndpointOutput{} + output := &ModifyTransitGatewayOutput{} out.Result = output var buff [1024]byte @@ -55641,7 +55488,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpoint) HandleDeserialize(ctx conte } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcEndpointOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyTransitGatewayOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55655,7 +55502,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpoint) HandleDeserialize(ctx conte return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyTransitGateway(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55688,14 +55535,14 @@ func awsEc2query_deserializeOpErrorModifyVpcEndpoint(response *smithyhttp.Respon } } -type awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification struct { +type awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference struct { } -func (*awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) ID() string { +func (*awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyTransitGatewayPrefixListReference) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55713,9 +55560,9 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointConnectionNotification(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyTransitGatewayPrefixListReference(response, &metadata) } - output := &ModifyVpcEndpointConnectionNotificationOutput{} + output := &ModifyTransitGatewayPrefixListReferenceOutput{} out.Result = output var buff [1024]byte @@ -55736,7 +55583,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcEndpointConnectionNotificationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyTransitGatewayPrefixListReferenceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55750,7 +55597,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcEndpointConnectionNotification(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyTransitGatewayPrefixListReference(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55783,14 +55630,14 @@ func awsEc2query_deserializeOpErrorModifyVpcEndpointConnectionNotification(respo } } -type awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration struct { +type awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment struct { } -func (*awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) ID() string { +func (*awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55808,9 +55655,9 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointServiceConfiguration(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyTransitGatewayVpcAttachment(response, &metadata) } - output := &ModifyVpcEndpointServiceConfigurationOutput{} + output := &ModifyTransitGatewayVpcAttachmentOutput{} out.Result = output var buff [1024]byte @@ -55831,7 +55678,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcEndpointServiceConfigurationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyTransitGatewayVpcAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55845,7 +55692,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcEndpointServiceConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55878,14 +55725,14 @@ func awsEc2query_deserializeOpErrorModifyVpcEndpointServiceConfiguration(respons } } -type awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility struct { +type awsEc2query_deserializeOpModifyVerifiedAccessEndpoint struct { } -func (*awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55903,9 +55750,9 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointServicePayerResponsibility(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpoint(response, &metadata) } - output := &ModifyVpcEndpointServicePayerResponsibilityOutput{} + output := &ModifyVerifiedAccessEndpointOutput{} out.Result = output var buff [1024]byte @@ -55926,7 +55773,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcEndpointServicePayerResponsibilityOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -55940,7 +55787,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) H return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcEndpointServicePayerResponsibility(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -55973,14 +55820,14 @@ func awsEc2query_deserializeOpErrorModifyVpcEndpointServicePayerResponsibility(r } } -type awsEc2query_deserializeOpModifyVpcEndpointServicePermissions struct { +type awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy struct { } -func (*awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -55998,9 +55845,9 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointServicePermissions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpointPolicy(response, &metadata) } - output := &ModifyVpcEndpointServicePermissionsOutput{} + output := &ModifyVerifiedAccessEndpointPolicyOutput{} out.Result = output var buff [1024]byte @@ -56021,7 +55868,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) HandleDes } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcEndpointServicePermissionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessEndpointPolicyOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56035,7 +55882,7 @@ func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) HandleDes return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcEndpointServicePermissions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56068,14 +55915,14 @@ func awsEc2query_deserializeOpErrorModifyVpcEndpointServicePermissions(response } } -type awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions struct { +type awsEc2query_deserializeOpModifyVerifiedAccessGroup struct { } -func (*awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessGroup) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56093,9 +55940,9 @@ func (m *awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcPeeringConnectionOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessGroup(response, &metadata) } - output := &ModifyVpcPeeringConnectionOptionsOutput{} + output := &ModifyVerifiedAccessGroupOutput{} out.Result = output var buff [1024]byte @@ -56116,7 +55963,7 @@ func (m *awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcPeeringConnectionOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56130,7 +55977,7 @@ func (m *awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcPeeringConnectionOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56163,14 +56010,14 @@ func awsEc2query_deserializeOpErrorModifyVpcPeeringConnectionOptions(response *s } } -type awsEc2query_deserializeOpModifyVpcTenancy struct { +type awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy struct { } -func (*awsEc2query_deserializeOpModifyVpcTenancy) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpcTenancy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessGroupPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56188,9 +56035,9 @@ func (m *awsEc2query_deserializeOpModifyVpcTenancy) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpcTenancy(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessGroupPolicy(response, &metadata) } - output := &ModifyVpcTenancyOutput{} + output := &ModifyVerifiedAccessGroupPolicyOutput{} out.Result = output var buff [1024]byte @@ -56211,7 +56058,7 @@ func (m *awsEc2query_deserializeOpModifyVpcTenancy) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpcTenancyOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessGroupPolicyOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56225,7 +56072,7 @@ func (m *awsEc2query_deserializeOpModifyVpcTenancy) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpcTenancy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessGroupPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56258,14 +56105,14 @@ func awsEc2query_deserializeOpErrorModifyVpcTenancy(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpModifyVpnConnection struct { +type awsEc2query_deserializeOpModifyVerifiedAccessInstance struct { } -func (*awsEc2query_deserializeOpModifyVpnConnection) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessInstance) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpnConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56283,9 +56130,9 @@ func (m *awsEc2query_deserializeOpModifyVpnConnection) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpnConnection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessInstance(response, &metadata) } - output := &ModifyVpnConnectionOutput{} + output := &ModifyVerifiedAccessInstanceOutput{} out.Result = output var buff [1024]byte @@ -56306,7 +56153,7 @@ func (m *awsEc2query_deserializeOpModifyVpnConnection) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpnConnectionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessInstanceOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56320,7 +56167,7 @@ func (m *awsEc2query_deserializeOpModifyVpnConnection) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpnConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56353,14 +56200,14 @@ func awsEc2query_deserializeOpErrorModifyVpnConnection(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpModifyVpnConnectionOptions struct { +type awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfiguration struct { } -func (*awsEc2query_deserializeOpModifyVpnConnectionOptions) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpnConnectionOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessInstanceLoggingConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56378,9 +56225,9 @@ func (m *awsEc2query_deserializeOpModifyVpnConnectionOptions) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpnConnectionOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessInstanceLoggingConfiguration(response, &metadata) } - output := &ModifyVpnConnectionOptionsOutput{} + output := &ModifyVerifiedAccessInstanceLoggingConfigurationOutput{} out.Result = output var buff [1024]byte @@ -56401,7 +56248,7 @@ func (m *awsEc2query_deserializeOpModifyVpnConnectionOptions) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpnConnectionOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessInstanceLoggingConfigurationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56415,7 +56262,7 @@ func (m *awsEc2query_deserializeOpModifyVpnConnectionOptions) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpnConnectionOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessInstanceLoggingConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56448,14 +56295,14 @@ func awsEc2query_deserializeOpErrorModifyVpnConnectionOptions(response *smithyht } } -type awsEc2query_deserializeOpModifyVpnTunnelCertificate struct { +type awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider struct { } -func (*awsEc2query_deserializeOpModifyVpnTunnelCertificate) ID() string { +func (*awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpnTunnelCertificate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVerifiedAccessTrustProvider) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56473,9 +56320,9 @@ func (m *awsEc2query_deserializeOpModifyVpnTunnelCertificate) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpnTunnelCertificate(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVerifiedAccessTrustProvider(response, &metadata) } - output := &ModifyVpnTunnelCertificateOutput{} + output := &ModifyVerifiedAccessTrustProviderOutput{} out.Result = output var buff [1024]byte @@ -56496,7 +56343,7 @@ func (m *awsEc2query_deserializeOpModifyVpnTunnelCertificate) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpnTunnelCertificateOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVerifiedAccessTrustProviderOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56510,7 +56357,7 @@ func (m *awsEc2query_deserializeOpModifyVpnTunnelCertificate) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpnTunnelCertificate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVerifiedAccessTrustProvider(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56543,14 +56390,14 @@ func awsEc2query_deserializeOpErrorModifyVpnTunnelCertificate(response *smithyht } } -type awsEc2query_deserializeOpModifyVpnTunnelOptions struct { +type awsEc2query_deserializeOpModifyVolume struct { } -func (*awsEc2query_deserializeOpModifyVpnTunnelOptions) ID() string { +func (*awsEc2query_deserializeOpModifyVolume) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpModifyVpnTunnelOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVolume) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56568,9 +56415,9 @@ func (m *awsEc2query_deserializeOpModifyVpnTunnelOptions) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorModifyVpnTunnelOptions(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVolume(response, &metadata) } - output := &ModifyVpnTunnelOptionsOutput{} + output := &ModifyVolumeOutput{} out.Result = output var buff [1024]byte @@ -56591,7 +56438,7 @@ func (m *awsEc2query_deserializeOpModifyVpnTunnelOptions) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentModifyVpnTunnelOptionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVolumeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56605,7 +56452,7 @@ func (m *awsEc2query_deserializeOpModifyVpnTunnelOptions) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorModifyVpnTunnelOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVolume(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56638,14 +56485,14 @@ func awsEc2query_deserializeOpErrorModifyVpnTunnelOptions(response *smithyhttp.R } } -type awsEc2query_deserializeOpMonitorInstances struct { +type awsEc2query_deserializeOpModifyVolumeAttribute struct { } -func (*awsEc2query_deserializeOpMonitorInstances) ID() string { +func (*awsEc2query_deserializeOpModifyVolumeAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpMonitorInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVolumeAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56663,44 +56510,21 @@ func (m *awsEc2query_deserializeOpMonitorInstances) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorMonitorInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVolumeAttribute(response, &metadata) } - output := &MonitorInstancesOutput{} + output := &ModifyVolumeAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentMonitorInstancesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorMonitorInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVolumeAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56733,14 +56557,14 @@ func awsEc2query_deserializeOpErrorMonitorInstances(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpMoveAddressToVpc struct { +type awsEc2query_deserializeOpModifyVpcAttribute struct { } -func (*awsEc2query_deserializeOpMoveAddressToVpc) ID() string { +func (*awsEc2query_deserializeOpModifyVpcAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpMoveAddressToVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56758,44 +56582,21 @@ func (m *awsEc2query_deserializeOpMoveAddressToVpc) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorMoveAddressToVpc(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcAttribute(response, &metadata) } - output := &MoveAddressToVpcOutput{} + output := &ModifyVpcAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentMoveAddressToVpcOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorMoveAddressToVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56828,14 +56629,14 @@ func awsEc2query_deserializeOpErrorMoveAddressToVpc(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpMoveByoipCidrToIpam struct { +type awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion struct { } -func (*awsEc2query_deserializeOpMoveByoipCidrToIpam) ID() string { +func (*awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpMoveByoipCidrToIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessExclusion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56853,9 +56654,9 @@ func (m *awsEc2query_deserializeOpMoveByoipCidrToIpam) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorMoveByoipCidrToIpam(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessExclusion(response, &metadata) } - output := &MoveByoipCidrToIpamOutput{} + output := &ModifyVpcBlockPublicAccessExclusionOutput{} out.Result = output var buff [1024]byte @@ -56876,7 +56677,7 @@ func (m *awsEc2query_deserializeOpMoveByoipCidrToIpam) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentMoveByoipCidrToIpamOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcBlockPublicAccessExclusionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56890,7 +56691,7 @@ func (m *awsEc2query_deserializeOpMoveByoipCidrToIpam) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorMoveByoipCidrToIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessExclusion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -56923,14 +56724,14 @@ func awsEc2query_deserializeOpErrorMoveByoipCidrToIpam(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpMoveCapacityReservationInstances struct { +type awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions struct { } -func (*awsEc2query_deserializeOpMoveCapacityReservationInstances) ID() string { +func (*awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpMoveCapacityReservationInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcBlockPublicAccessOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -56948,9 +56749,9 @@ func (m *awsEc2query_deserializeOpMoveCapacityReservationInstances) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorMoveCapacityReservationInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessOptions(response, &metadata) } - output := &MoveCapacityReservationInstancesOutput{} + output := &ModifyVpcBlockPublicAccessOptionsOutput{} out.Result = output var buff [1024]byte @@ -56971,7 +56772,7 @@ func (m *awsEc2query_deserializeOpMoveCapacityReservationInstances) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentMoveCapacityReservationInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcBlockPublicAccessOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -56985,7 +56786,7 @@ func (m *awsEc2query_deserializeOpMoveCapacityReservationInstances) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorMoveCapacityReservationInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcBlockPublicAccessOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57018,14 +56819,14 @@ func awsEc2query_deserializeOpErrorMoveCapacityReservationInstances(response *sm } } -type awsEc2query_deserializeOpProvisionByoipCidr struct { +type awsEc2query_deserializeOpModifyVpcEndpoint struct { } -func (*awsEc2query_deserializeOpProvisionByoipCidr) ID() string { +func (*awsEc2query_deserializeOpModifyVpcEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpProvisionByoipCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57043,9 +56844,9 @@ func (m *awsEc2query_deserializeOpProvisionByoipCidr) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorProvisionByoipCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpoint(response, &metadata) } - output := &ProvisionByoipCidrOutput{} + output := &ModifyVpcEndpointOutput{} out.Result = output var buff [1024]byte @@ -57066,7 +56867,7 @@ func (m *awsEc2query_deserializeOpProvisionByoipCidr) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentProvisionByoipCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcEndpointOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57080,7 +56881,7 @@ func (m *awsEc2query_deserializeOpProvisionByoipCidr) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorProvisionByoipCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57113,14 +56914,14 @@ func awsEc2query_deserializeOpErrorProvisionByoipCidr(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpProvisionIpamByoasn struct { +type awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification struct { } -func (*awsEc2query_deserializeOpProvisionIpamByoasn) ID() string { +func (*awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpProvisionIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcEndpointConnectionNotification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57138,9 +56939,9 @@ func (m *awsEc2query_deserializeOpProvisionIpamByoasn) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorProvisionIpamByoasn(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointConnectionNotification(response, &metadata) } - output := &ProvisionIpamByoasnOutput{} + output := &ModifyVpcEndpointConnectionNotificationOutput{} out.Result = output var buff [1024]byte @@ -57161,7 +56962,7 @@ func (m *awsEc2query_deserializeOpProvisionIpamByoasn) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentProvisionIpamByoasnOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcEndpointConnectionNotificationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57175,7 +56976,7 @@ func (m *awsEc2query_deserializeOpProvisionIpamByoasn) HandleDeserialize(ctx con return out, metadata, err } -func awsEc2query_deserializeOpErrorProvisionIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcEndpointConnectionNotification(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57208,14 +57009,14 @@ func awsEc2query_deserializeOpErrorProvisionIpamByoasn(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpProvisionIpamPoolCidr struct { +type awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration struct { } -func (*awsEc2query_deserializeOpProvisionIpamPoolCidr) ID() string { +func (*awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpProvisionIpamPoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcEndpointServiceConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57233,9 +57034,9 @@ func (m *awsEc2query_deserializeOpProvisionIpamPoolCidr) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorProvisionIpamPoolCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointServiceConfiguration(response, &metadata) } - output := &ProvisionIpamPoolCidrOutput{} + output := &ModifyVpcEndpointServiceConfigurationOutput{} out.Result = output var buff [1024]byte @@ -57256,7 +57057,7 @@ func (m *awsEc2query_deserializeOpProvisionIpamPoolCidr) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentProvisionIpamPoolCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcEndpointServiceConfigurationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57270,7 +57071,7 @@ func (m *awsEc2query_deserializeOpProvisionIpamPoolCidr) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorProvisionIpamPoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcEndpointServiceConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57303,14 +57104,14 @@ func awsEc2query_deserializeOpErrorProvisionIpamPoolCidr(response *smithyhttp.Re } } -type awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr struct { +type awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility struct { } -func (*awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) ID() string { +func (*awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePayerResponsibility) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57328,9 +57129,9 @@ func (m *awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorProvisionPublicIpv4PoolCidr(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointServicePayerResponsibility(response, &metadata) } - output := &ProvisionPublicIpv4PoolCidrOutput{} + output := &ModifyVpcEndpointServicePayerResponsibilityOutput{} out.Result = output var buff [1024]byte @@ -57351,7 +57152,7 @@ func (m *awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) HandleDeserialize } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentProvisionPublicIpv4PoolCidrOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcEndpointServicePayerResponsibilityOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57365,7 +57166,7 @@ func (m *awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) HandleDeserialize return out, metadata, err } -func awsEc2query_deserializeOpErrorProvisionPublicIpv4PoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcEndpointServicePayerResponsibility(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57398,14 +57199,14 @@ func awsEc2query_deserializeOpErrorProvisionPublicIpv4PoolCidr(response *smithyh } } -type awsEc2query_deserializeOpPurchaseCapacityBlock struct { +type awsEc2query_deserializeOpModifyVpcEndpointServicePermissions struct { } -func (*awsEc2query_deserializeOpPurchaseCapacityBlock) ID() string { +func (*awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpPurchaseCapacityBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcEndpointServicePermissions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57423,9 +57224,9 @@ func (m *awsEc2query_deserializeOpPurchaseCapacityBlock) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorPurchaseCapacityBlock(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcEndpointServicePermissions(response, &metadata) } - output := &PurchaseCapacityBlockOutput{} + output := &ModifyVpcEndpointServicePermissionsOutput{} out.Result = output var buff [1024]byte @@ -57446,7 +57247,7 @@ func (m *awsEc2query_deserializeOpPurchaseCapacityBlock) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentPurchaseCapacityBlockOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcEndpointServicePermissionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57460,7 +57261,7 @@ func (m *awsEc2query_deserializeOpPurchaseCapacityBlock) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorPurchaseCapacityBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcEndpointServicePermissions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57493,14 +57294,14 @@ func awsEc2query_deserializeOpErrorPurchaseCapacityBlock(response *smithyhttp.Re } } -type awsEc2query_deserializeOpPurchaseCapacityBlockExtension struct { +type awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions struct { } -func (*awsEc2query_deserializeOpPurchaseCapacityBlockExtension) ID() string { +func (*awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpPurchaseCapacityBlockExtension) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcPeeringConnectionOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57518,9 +57319,9 @@ func (m *awsEc2query_deserializeOpPurchaseCapacityBlockExtension) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorPurchaseCapacityBlockExtension(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcPeeringConnectionOptions(response, &metadata) } - output := &PurchaseCapacityBlockExtensionOutput{} + output := &ModifyVpcPeeringConnectionOptionsOutput{} out.Result = output var buff [1024]byte @@ -57541,7 +57342,7 @@ func (m *awsEc2query_deserializeOpPurchaseCapacityBlockExtension) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentPurchaseCapacityBlockExtensionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcPeeringConnectionOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57555,7 +57356,7 @@ func (m *awsEc2query_deserializeOpPurchaseCapacityBlockExtension) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorPurchaseCapacityBlockExtension(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcPeeringConnectionOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57588,14 +57389,14 @@ func awsEc2query_deserializeOpErrorPurchaseCapacityBlockExtension(response *smit } } -type awsEc2query_deserializeOpPurchaseHostReservation struct { +type awsEc2query_deserializeOpModifyVpcTenancy struct { } -func (*awsEc2query_deserializeOpPurchaseHostReservation) ID() string { +func (*awsEc2query_deserializeOpModifyVpcTenancy) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpPurchaseHostReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpcTenancy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57613,9 +57414,9 @@ func (m *awsEc2query_deserializeOpPurchaseHostReservation) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorPurchaseHostReservation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpcTenancy(response, &metadata) } - output := &PurchaseHostReservationOutput{} + output := &ModifyVpcTenancyOutput{} out.Result = output var buff [1024]byte @@ -57636,7 +57437,7 @@ func (m *awsEc2query_deserializeOpPurchaseHostReservation) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentPurchaseHostReservationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpcTenancyOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57650,7 +57451,7 @@ func (m *awsEc2query_deserializeOpPurchaseHostReservation) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorPurchaseHostReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpcTenancy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57683,14 +57484,14 @@ func awsEc2query_deserializeOpErrorPurchaseHostReservation(response *smithyhttp. } } -type awsEc2query_deserializeOpPurchaseReservedInstancesOffering struct { +type awsEc2query_deserializeOpModifyVpnConnection struct { } -func (*awsEc2query_deserializeOpPurchaseReservedInstancesOffering) ID() string { +func (*awsEc2query_deserializeOpModifyVpnConnection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpPurchaseReservedInstancesOffering) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpnConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57708,9 +57509,9 @@ func (m *awsEc2query_deserializeOpPurchaseReservedInstancesOffering) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorPurchaseReservedInstancesOffering(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpnConnection(response, &metadata) } - output := &PurchaseReservedInstancesOfferingOutput{} + output := &ModifyVpnConnectionOutput{} out.Result = output var buff [1024]byte @@ -57731,7 +57532,7 @@ func (m *awsEc2query_deserializeOpPurchaseReservedInstancesOffering) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentPurchaseReservedInstancesOfferingOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpnConnectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57745,7 +57546,7 @@ func (m *awsEc2query_deserializeOpPurchaseReservedInstancesOffering) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorPurchaseReservedInstancesOffering(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpnConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57778,14 +57579,14 @@ func awsEc2query_deserializeOpErrorPurchaseReservedInstancesOffering(response *s } } -type awsEc2query_deserializeOpPurchaseScheduledInstances struct { +type awsEc2query_deserializeOpModifyVpnConnectionOptions struct { } -func (*awsEc2query_deserializeOpPurchaseScheduledInstances) ID() string { +func (*awsEc2query_deserializeOpModifyVpnConnectionOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpPurchaseScheduledInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpnConnectionOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57803,9 +57604,9 @@ func (m *awsEc2query_deserializeOpPurchaseScheduledInstances) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorPurchaseScheduledInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpnConnectionOptions(response, &metadata) } - output := &PurchaseScheduledInstancesOutput{} + output := &ModifyVpnConnectionOptionsOutput{} out.Result = output var buff [1024]byte @@ -57826,7 +57627,7 @@ func (m *awsEc2query_deserializeOpPurchaseScheduledInstances) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentPurchaseScheduledInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpnConnectionOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -57840,79 +57641,7 @@ func (m *awsEc2query_deserializeOpPurchaseScheduledInstances) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorPurchaseScheduledInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) - if err != nil { - return err - } - awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsEc2query_deserializeOpRebootInstances struct { -} - -func (*awsEc2query_deserializeOpRebootInstances) ID() string { - return "OperationDeserializer" -} - -func (m *awsEc2query_deserializeOpRebootInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRebootInstances(response, &metadata) - } - output := &RebootInstancesOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsEc2query_deserializeOpErrorRebootInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpnConnectionOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -57945,14 +57674,14 @@ func awsEc2query_deserializeOpErrorRebootInstances(response *smithyhttp.Response } } -type awsEc2query_deserializeOpRegisterImage struct { +type awsEc2query_deserializeOpModifyVpnTunnelCertificate struct { } -func (*awsEc2query_deserializeOpRegisterImage) ID() string { +func (*awsEc2query_deserializeOpModifyVpnTunnelCertificate) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRegisterImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpnTunnelCertificate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -57970,9 +57699,9 @@ func (m *awsEc2query_deserializeOpRegisterImage) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRegisterImage(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpnTunnelCertificate(response, &metadata) } - output := &RegisterImageOutput{} + output := &ModifyVpnTunnelCertificateOutput{} out.Result = output var buff [1024]byte @@ -57993,7 +57722,7 @@ func (m *awsEc2query_deserializeOpRegisterImage) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRegisterImageOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpnTunnelCertificateOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58007,7 +57736,7 @@ func (m *awsEc2query_deserializeOpRegisterImage) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorRegisterImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpnTunnelCertificate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58040,14 +57769,14 @@ func awsEc2query_deserializeOpErrorRegisterImage(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes struct { +type awsEc2query_deserializeOpModifyVpnTunnelOptions struct { } -func (*awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) ID() string { +func (*awsEc2query_deserializeOpModifyVpnTunnelOptions) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpModifyVpnTunnelOptions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58065,9 +57794,9 @@ func (m *awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRegisterInstanceEventNotificationAttributes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorModifyVpnTunnelOptions(response, &metadata) } - output := &RegisterInstanceEventNotificationAttributesOutput{} + output := &ModifyVpnTunnelOptionsOutput{} out.Result = output var buff [1024]byte @@ -58088,7 +57817,7 @@ func (m *awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRegisterInstanceEventNotificationAttributesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentModifyVpnTunnelOptionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58102,7 +57831,7 @@ func (m *awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) H return out, metadata, err } -func awsEc2query_deserializeOpErrorRegisterInstanceEventNotificationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorModifyVpnTunnelOptions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58135,14 +57864,14 @@ func awsEc2query_deserializeOpErrorRegisterInstanceEventNotificationAttributes(r } } -type awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers struct { +type awsEc2query_deserializeOpMonitorInstances struct { } -func (*awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) ID() string { +func (*awsEc2query_deserializeOpMonitorInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpMonitorInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58160,9 +57889,9 @@ func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupMembers(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorMonitorInstances(response, &metadata) } - output := &RegisterTransitGatewayMulticastGroupMembersOutput{} + output := &MonitorInstancesOutput{} out.Result = output var buff [1024]byte @@ -58183,7 +57912,7 @@ func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRegisterTransitGatewayMulticastGroupMembersOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentMonitorInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58197,7 +57926,7 @@ func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) H return out, metadata, err } -func awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupMembers(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorMonitorInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58230,14 +57959,14 @@ func awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupMembers(r } } -type awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources struct { +type awsEc2query_deserializeOpMoveAddressToVpc struct { } -func (*awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) ID() string { +func (*awsEc2query_deserializeOpMoveAddressToVpc) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpMoveAddressToVpc) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58255,9 +57984,9 @@ func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupSources(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorMoveAddressToVpc(response, &metadata) } - output := &RegisterTransitGatewayMulticastGroupSourcesOutput{} + output := &MoveAddressToVpcOutput{} out.Result = output var buff [1024]byte @@ -58278,7 +58007,7 @@ func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRegisterTransitGatewayMulticastGroupSourcesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentMoveAddressToVpcOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58292,7 +58021,7 @@ func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) H return out, metadata, err } -func awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupSources(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorMoveAddressToVpc(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58325,14 +58054,14 @@ func awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupSources(r } } -type awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership struct { +type awsEc2query_deserializeOpMoveByoipCidrToIpam struct { } -func (*awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) ID() string { +func (*awsEc2query_deserializeOpMoveByoipCidrToIpam) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpMoveByoipCidrToIpam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58350,9 +58079,9 @@ func (m *awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) Han } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRejectCapacityReservationBillingOwnership(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorMoveByoipCidrToIpam(response, &metadata) } - output := &RejectCapacityReservationBillingOwnershipOutput{} + output := &MoveByoipCidrToIpamOutput{} out.Result = output var buff [1024]byte @@ -58373,7 +58102,7 @@ func (m *awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) Han } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRejectCapacityReservationBillingOwnershipOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentMoveByoipCidrToIpamOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58387,7 +58116,7 @@ func (m *awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) Han return out, metadata, err } -func awsEc2query_deserializeOpErrorRejectCapacityReservationBillingOwnership(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorMoveByoipCidrToIpam(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58420,14 +58149,14 @@ func awsEc2query_deserializeOpErrorRejectCapacityReservationBillingOwnership(res } } -type awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociations struct { +type awsEc2query_deserializeOpMoveCapacityReservationInstances struct { } -func (*awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociations) ID() string { +func (*awsEc2query_deserializeOpMoveCapacityReservationInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpMoveCapacityReservationInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58445,9 +58174,9 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociation } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRejectTransitGatewayMulticastDomainAssociations(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorMoveCapacityReservationInstances(response, &metadata) } - output := &RejectTransitGatewayMulticastDomainAssociationsOutput{} + output := &MoveCapacityReservationInstancesOutput{} out.Result = output var buff [1024]byte @@ -58468,7 +58197,7 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociation } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRejectTransitGatewayMulticastDomainAssociationsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentMoveCapacityReservationInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58482,7 +58211,7 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociation return out, metadata, err } -func awsEc2query_deserializeOpErrorRejectTransitGatewayMulticastDomainAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorMoveCapacityReservationInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58515,14 +58244,14 @@ func awsEc2query_deserializeOpErrorRejectTransitGatewayMulticastDomainAssociatio } } -type awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment struct { +type awsEc2query_deserializeOpProvisionByoipCidr struct { } -func (*awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) ID() string { +func (*awsEc2query_deserializeOpProvisionByoipCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpProvisionByoipCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58540,9 +58269,9 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) HandleD } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRejectTransitGatewayPeeringAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorProvisionByoipCidr(response, &metadata) } - output := &RejectTransitGatewayPeeringAttachmentOutput{} + output := &ProvisionByoipCidrOutput{} out.Result = output var buff [1024]byte @@ -58563,7 +58292,7 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) HandleD } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRejectTransitGatewayPeeringAttachmentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentProvisionByoipCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58577,7 +58306,7 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) HandleD return out, metadata, err } -func awsEc2query_deserializeOpErrorRejectTransitGatewayPeeringAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorProvisionByoipCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58610,14 +58339,14 @@ func awsEc2query_deserializeOpErrorRejectTransitGatewayPeeringAttachment(respons } } -type awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment struct { +type awsEc2query_deserializeOpProvisionIpamByoasn struct { } -func (*awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) ID() string { +func (*awsEc2query_deserializeOpProvisionIpamByoasn) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpProvisionIpamByoasn) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58635,9 +58364,9 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRejectTransitGatewayVpcAttachment(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorProvisionIpamByoasn(response, &metadata) } - output := &RejectTransitGatewayVpcAttachmentOutput{} + output := &ProvisionIpamByoasnOutput{} out.Result = output var buff [1024]byte @@ -58658,7 +58387,7 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRejectTransitGatewayVpcAttachmentOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentProvisionIpamByoasnOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58672,7 +58401,7 @@ func (m *awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) HandleDeser return out, metadata, err } -func awsEc2query_deserializeOpErrorRejectTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorProvisionIpamByoasn(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58705,14 +58434,14 @@ func awsEc2query_deserializeOpErrorRejectTransitGatewayVpcAttachment(response *s } } -type awsEc2query_deserializeOpRejectVpcEndpointConnections struct { +type awsEc2query_deserializeOpProvisionIpamPoolCidr struct { } -func (*awsEc2query_deserializeOpRejectVpcEndpointConnections) ID() string { +func (*awsEc2query_deserializeOpProvisionIpamPoolCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRejectVpcEndpointConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpProvisionIpamPoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58730,9 +58459,9 @@ func (m *awsEc2query_deserializeOpRejectVpcEndpointConnections) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRejectVpcEndpointConnections(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorProvisionIpamPoolCidr(response, &metadata) } - output := &RejectVpcEndpointConnectionsOutput{} + output := &ProvisionIpamPoolCidrOutput{} out.Result = output var buff [1024]byte @@ -58753,7 +58482,7 @@ func (m *awsEc2query_deserializeOpRejectVpcEndpointConnections) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRejectVpcEndpointConnectionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentProvisionIpamPoolCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58767,7 +58496,7 @@ func (m *awsEc2query_deserializeOpRejectVpcEndpointConnections) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorRejectVpcEndpointConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorProvisionIpamPoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58800,14 +58529,14 @@ func awsEc2query_deserializeOpErrorRejectVpcEndpointConnections(response *smithy } } -type awsEc2query_deserializeOpRejectVpcPeeringConnection struct { +type awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr struct { } -func (*awsEc2query_deserializeOpRejectVpcPeeringConnection) ID() string { +func (*awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRejectVpcPeeringConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpProvisionPublicIpv4PoolCidr) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58825,9 +58554,9 @@ func (m *awsEc2query_deserializeOpRejectVpcPeeringConnection) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRejectVpcPeeringConnection(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorProvisionPublicIpv4PoolCidr(response, &metadata) } - output := &RejectVpcPeeringConnectionOutput{} + output := &ProvisionPublicIpv4PoolCidrOutput{} out.Result = output var buff [1024]byte @@ -58848,7 +58577,7 @@ func (m *awsEc2query_deserializeOpRejectVpcPeeringConnection) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRejectVpcPeeringConnectionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentProvisionPublicIpv4PoolCidrOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -58862,7 +58591,7 @@ func (m *awsEc2query_deserializeOpRejectVpcPeeringConnection) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorRejectVpcPeeringConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorProvisionPublicIpv4PoolCidr(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58895,14 +58624,14 @@ func awsEc2query_deserializeOpErrorRejectVpcPeeringConnection(response *smithyht } } -type awsEc2query_deserializeOpReleaseAddress struct { +type awsEc2query_deserializeOpPurchaseCapacityBlock struct { } -func (*awsEc2query_deserializeOpReleaseAddress) ID() string { +func (*awsEc2query_deserializeOpPurchaseCapacityBlock) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReleaseAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpPurchaseCapacityBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58920,21 +58649,44 @@ func (m *awsEc2query_deserializeOpReleaseAddress) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReleaseAddress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorPurchaseCapacityBlock(response, &metadata) } - output := &ReleaseAddressOutput{} + output := &PurchaseCapacityBlockOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentPurchaseCapacityBlockOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorReleaseAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorPurchaseCapacityBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -58967,14 +58719,14 @@ func awsEc2query_deserializeOpErrorReleaseAddress(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpReleaseHosts struct { +type awsEc2query_deserializeOpPurchaseCapacityBlockExtension struct { } -func (*awsEc2query_deserializeOpReleaseHosts) ID() string { +func (*awsEc2query_deserializeOpPurchaseCapacityBlockExtension) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReleaseHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpPurchaseCapacityBlockExtension) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -58992,9 +58744,9 @@ func (m *awsEc2query_deserializeOpReleaseHosts) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReleaseHosts(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorPurchaseCapacityBlockExtension(response, &metadata) } - output := &ReleaseHostsOutput{} + output := &PurchaseCapacityBlockExtensionOutput{} out.Result = output var buff [1024]byte @@ -59015,7 +58767,7 @@ func (m *awsEc2query_deserializeOpReleaseHosts) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReleaseHostsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentPurchaseCapacityBlockExtensionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59029,7 +58781,7 @@ func (m *awsEc2query_deserializeOpReleaseHosts) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorReleaseHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorPurchaseCapacityBlockExtension(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59062,14 +58814,14 @@ func awsEc2query_deserializeOpErrorReleaseHosts(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpReleaseIpamPoolAllocation struct { +type awsEc2query_deserializeOpPurchaseHostReservation struct { } -func (*awsEc2query_deserializeOpReleaseIpamPoolAllocation) ID() string { +func (*awsEc2query_deserializeOpPurchaseHostReservation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReleaseIpamPoolAllocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpPurchaseHostReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59087,9 +58839,9 @@ func (m *awsEc2query_deserializeOpReleaseIpamPoolAllocation) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReleaseIpamPoolAllocation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorPurchaseHostReservation(response, &metadata) } - output := &ReleaseIpamPoolAllocationOutput{} + output := &PurchaseHostReservationOutput{} out.Result = output var buff [1024]byte @@ -59110,7 +58862,7 @@ func (m *awsEc2query_deserializeOpReleaseIpamPoolAllocation) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReleaseIpamPoolAllocationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentPurchaseHostReservationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59124,7 +58876,7 @@ func (m *awsEc2query_deserializeOpReleaseIpamPoolAllocation) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorReleaseIpamPoolAllocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorPurchaseHostReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59157,14 +58909,14 @@ func awsEc2query_deserializeOpErrorReleaseIpamPoolAllocation(response *smithyhtt } } -type awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation struct { +type awsEc2query_deserializeOpPurchaseReservedInstancesOffering struct { } -func (*awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) ID() string { +func (*awsEc2query_deserializeOpPurchaseReservedInstancesOffering) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpPurchaseReservedInstancesOffering) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59182,9 +58934,9 @@ func (m *awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) HandleDe } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceIamInstanceProfileAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorPurchaseReservedInstancesOffering(response, &metadata) } - output := &ReplaceIamInstanceProfileAssociationOutput{} + output := &PurchaseReservedInstancesOfferingOutput{} out.Result = output var buff [1024]byte @@ -59205,7 +58957,7 @@ func (m *awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) HandleDe } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReplaceIamInstanceProfileAssociationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentPurchaseReservedInstancesOfferingOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59219,7 +58971,7 @@ func (m *awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) HandleDe return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceIamInstanceProfileAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorPurchaseReservedInstancesOffering(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59252,14 +59004,14 @@ func awsEc2query_deserializeOpErrorReplaceIamInstanceProfileAssociation(response } } -type awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings struct { +type awsEc2query_deserializeOpPurchaseScheduledInstances struct { } -func (*awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) ID() string { +func (*awsEc2query_deserializeOpPurchaseScheduledInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpPurchaseScheduledInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59277,9 +59029,9 @@ func (m *awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceImageCriteriaInAllowedImagesSettings(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorPurchaseScheduledInstances(response, &metadata) } - output := &ReplaceImageCriteriaInAllowedImagesSettingsOutput{} + output := &PurchaseScheduledInstancesOutput{} out.Result = output var buff [1024]byte @@ -59300,7 +59052,7 @@ func (m *awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) H } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReplaceImageCriteriaInAllowedImagesSettingsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentPurchaseScheduledInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59314,7 +59066,7 @@ func (m *awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) H return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceImageCriteriaInAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorPurchaseScheduledInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59347,14 +59099,14 @@ func awsEc2query_deserializeOpErrorReplaceImageCriteriaInAllowedImagesSettings(r } } -type awsEc2query_deserializeOpReplaceNetworkAclAssociation struct { +type awsEc2query_deserializeOpRebootInstances struct { } -func (*awsEc2query_deserializeOpReplaceNetworkAclAssociation) ID() string { +func (*awsEc2query_deserializeOpRebootInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceNetworkAclAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRebootInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59372,44 +59124,21 @@ func (m *awsEc2query_deserializeOpReplaceNetworkAclAssociation) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceNetworkAclAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRebootInstances(response, &metadata) } - output := &ReplaceNetworkAclAssociationOutput{} + output := &RebootInstancesOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReplaceNetworkAclAssociationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceNetworkAclAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRebootInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59442,14 +59171,14 @@ func awsEc2query_deserializeOpErrorReplaceNetworkAclAssociation(response *smithy } } -type awsEc2query_deserializeOpReplaceNetworkAclEntry struct { +type awsEc2query_deserializeOpRegisterImage struct { } -func (*awsEc2query_deserializeOpReplaceNetworkAclEntry) ID() string { +func (*awsEc2query_deserializeOpRegisterImage) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceNetworkAclEntry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRegisterImage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59467,21 +59196,44 @@ func (m *awsEc2query_deserializeOpReplaceNetworkAclEntry) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceNetworkAclEntry(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRegisterImage(response, &metadata) } - output := &ReplaceNetworkAclEntryOutput{} + output := &RegisterImageOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentRegisterImageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceNetworkAclEntry(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRegisterImage(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59514,14 +59266,14 @@ func awsEc2query_deserializeOpErrorReplaceNetworkAclEntry(response *smithyhttp.R } } -type awsEc2query_deserializeOpReplaceRoute struct { +type awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes struct { } -func (*awsEc2query_deserializeOpReplaceRoute) ID() string { +func (*awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRegisterInstanceEventNotificationAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59539,21 +59291,44 @@ func (m *awsEc2query_deserializeOpReplaceRoute) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRegisterInstanceEventNotificationAttributes(response, &metadata) } - output := &ReplaceRouteOutput{} + output := &RegisterInstanceEventNotificationAttributesOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentRegisterInstanceEventNotificationAttributesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRegisterInstanceEventNotificationAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59586,14 +59361,14 @@ func awsEc2query_deserializeOpErrorReplaceRoute(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpReplaceRouteTableAssociation struct { +type awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers struct { } -func (*awsEc2query_deserializeOpReplaceRouteTableAssociation) ID() string { +func (*awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceRouteTableAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupMembers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59611,9 +59386,9 @@ func (m *awsEc2query_deserializeOpReplaceRouteTableAssociation) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceRouteTableAssociation(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupMembers(response, &metadata) } - output := &ReplaceRouteTableAssociationOutput{} + output := &RegisterTransitGatewayMulticastGroupMembersOutput{} out.Result = output var buff [1024]byte @@ -59634,7 +59409,7 @@ func (m *awsEc2query_deserializeOpReplaceRouteTableAssociation) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReplaceRouteTableAssociationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRegisterTransitGatewayMulticastGroupMembersOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59648,7 +59423,7 @@ func (m *awsEc2query_deserializeOpReplaceRouteTableAssociation) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceRouteTableAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupMembers(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59681,14 +59456,14 @@ func awsEc2query_deserializeOpErrorReplaceRouteTableAssociation(response *smithy } } -type awsEc2query_deserializeOpReplaceTransitGatewayRoute struct { +type awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources struct { } -func (*awsEc2query_deserializeOpReplaceTransitGatewayRoute) ID() string { +func (*awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceTransitGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRegisterTransitGatewayMulticastGroupSources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59706,9 +59481,9 @@ func (m *awsEc2query_deserializeOpReplaceTransitGatewayRoute) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceTransitGatewayRoute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupSources(response, &metadata) } - output := &ReplaceTransitGatewayRouteOutput{} + output := &RegisterTransitGatewayMulticastGroupSourcesOutput{} out.Result = output var buff [1024]byte @@ -59729,7 +59504,7 @@ func (m *awsEc2query_deserializeOpReplaceTransitGatewayRoute) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReplaceTransitGatewayRouteOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRegisterTransitGatewayMulticastGroupSourcesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59743,7 +59518,7 @@ func (m *awsEc2query_deserializeOpReplaceTransitGatewayRoute) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceTransitGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRegisterTransitGatewayMulticastGroupSources(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59776,14 +59551,14 @@ func awsEc2query_deserializeOpErrorReplaceTransitGatewayRoute(response *smithyht } } -type awsEc2query_deserializeOpReplaceVpnTunnel struct { +type awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership struct { } -func (*awsEc2query_deserializeOpReplaceVpnTunnel) ID() string { +func (*awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReplaceVpnTunnel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRejectCapacityReservationBillingOwnership) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59801,9 +59576,9 @@ func (m *awsEc2query_deserializeOpReplaceVpnTunnel) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReplaceVpnTunnel(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRejectCapacityReservationBillingOwnership(response, &metadata) } - output := &ReplaceVpnTunnelOutput{} + output := &RejectCapacityReservationBillingOwnershipOutput{} out.Result = output var buff [1024]byte @@ -59824,7 +59599,7 @@ func (m *awsEc2query_deserializeOpReplaceVpnTunnel) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentReplaceVpnTunnelOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRejectCapacityReservationBillingOwnershipOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -59838,7 +59613,7 @@ func (m *awsEc2query_deserializeOpReplaceVpnTunnel) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorReplaceVpnTunnel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRejectCapacityReservationBillingOwnership(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59871,14 +59646,14 @@ func awsEc2query_deserializeOpErrorReplaceVpnTunnel(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpReportInstanceStatus struct { +type awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociations struct { } -func (*awsEc2query_deserializeOpReportInstanceStatus) ID() string { +func (*awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociations) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpReportInstanceStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRejectTransitGatewayMulticastDomainAssociations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59896,21 +59671,44 @@ func (m *awsEc2query_deserializeOpReportInstanceStatus) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorReportInstanceStatus(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRejectTransitGatewayMulticastDomainAssociations(response, &metadata) } - output := &ReportInstanceStatusOutput{} + output := &RejectTransitGatewayMulticastDomainAssociationsOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentRejectTransitGatewayMulticastDomainAssociationsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorReportInstanceStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRejectTransitGatewayMulticastDomainAssociations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -59943,14 +59741,14 @@ func awsEc2query_deserializeOpErrorReportInstanceStatus(response *smithyhttp.Res } } -type awsEc2query_deserializeOpRequestSpotFleet struct { +type awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment struct { } -func (*awsEc2query_deserializeOpRequestSpotFleet) ID() string { +func (*awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRequestSpotFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRejectTransitGatewayPeeringAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -59968,9 +59766,9 @@ func (m *awsEc2query_deserializeOpRequestSpotFleet) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRequestSpotFleet(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRejectTransitGatewayPeeringAttachment(response, &metadata) } - output := &RequestSpotFleetOutput{} + output := &RejectTransitGatewayPeeringAttachmentOutput{} out.Result = output var buff [1024]byte @@ -59991,7 +59789,7 @@ func (m *awsEc2query_deserializeOpRequestSpotFleet) HandleDeserialize(ctx contex } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRequestSpotFleetOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRejectTransitGatewayPeeringAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60005,7 +59803,7 @@ func (m *awsEc2query_deserializeOpRequestSpotFleet) HandleDeserialize(ctx contex return out, metadata, err } -func awsEc2query_deserializeOpErrorRequestSpotFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRejectTransitGatewayPeeringAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60038,14 +59836,14 @@ func awsEc2query_deserializeOpErrorRequestSpotFleet(response *smithyhttp.Respons } } -type awsEc2query_deserializeOpRequestSpotInstances struct { +type awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment struct { } -func (*awsEc2query_deserializeOpRequestSpotInstances) ID() string { +func (*awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRequestSpotInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRejectTransitGatewayVpcAttachment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60063,9 +59861,9 @@ func (m *awsEc2query_deserializeOpRequestSpotInstances) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRequestSpotInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRejectTransitGatewayVpcAttachment(response, &metadata) } - output := &RequestSpotInstancesOutput{} + output := &RejectTransitGatewayVpcAttachmentOutput{} out.Result = output var buff [1024]byte @@ -60086,7 +59884,7 @@ func (m *awsEc2query_deserializeOpRequestSpotInstances) HandleDeserialize(ctx co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRequestSpotInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRejectTransitGatewayVpcAttachmentOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60100,7 +59898,7 @@ func (m *awsEc2query_deserializeOpRequestSpotInstances) HandleDeserialize(ctx co return out, metadata, err } -func awsEc2query_deserializeOpErrorRequestSpotInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRejectTransitGatewayVpcAttachment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60133,14 +59931,14 @@ func awsEc2query_deserializeOpErrorRequestSpotInstances(response *smithyhttp.Res } } -type awsEc2query_deserializeOpResetAddressAttribute struct { +type awsEc2query_deserializeOpRejectVpcEndpointConnections struct { } -func (*awsEc2query_deserializeOpResetAddressAttribute) ID() string { +func (*awsEc2query_deserializeOpRejectVpcEndpointConnections) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetAddressAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRejectVpcEndpointConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60158,9 +59956,9 @@ func (m *awsEc2query_deserializeOpResetAddressAttribute) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetAddressAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRejectVpcEndpointConnections(response, &metadata) } - output := &ResetAddressAttributeOutput{} + output := &RejectVpcEndpointConnectionsOutput{} out.Result = output var buff [1024]byte @@ -60181,7 +59979,7 @@ func (m *awsEc2query_deserializeOpResetAddressAttribute) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentResetAddressAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRejectVpcEndpointConnectionsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60195,7 +59993,7 @@ func (m *awsEc2query_deserializeOpResetAddressAttribute) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorResetAddressAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRejectVpcEndpointConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60228,14 +60026,14 @@ func awsEc2query_deserializeOpErrorResetAddressAttribute(response *smithyhttp.Re } } -type awsEc2query_deserializeOpResetEbsDefaultKmsKeyId struct { +type awsEc2query_deserializeOpRejectVpcPeeringConnection struct { } -func (*awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) ID() string { +func (*awsEc2query_deserializeOpRejectVpcPeeringConnection) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRejectVpcPeeringConnection) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60253,9 +60051,9 @@ func (m *awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetEbsDefaultKmsKeyId(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRejectVpcPeeringConnection(response, &metadata) } - output := &ResetEbsDefaultKmsKeyIdOutput{} + output := &RejectVpcPeeringConnectionOutput{} out.Result = output var buff [1024]byte @@ -60276,7 +60074,7 @@ func (m *awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentResetEbsDefaultKmsKeyIdOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRejectVpcPeeringConnectionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60290,7 +60088,7 @@ func (m *awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorResetEbsDefaultKmsKeyId(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRejectVpcPeeringConnection(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60323,14 +60121,14 @@ func awsEc2query_deserializeOpErrorResetEbsDefaultKmsKeyId(response *smithyhttp. } } -type awsEc2query_deserializeOpResetFpgaImageAttribute struct { +type awsEc2query_deserializeOpReleaseAddress struct { } -func (*awsEc2query_deserializeOpResetFpgaImageAttribute) ID() string { +func (*awsEc2query_deserializeOpReleaseAddress) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetFpgaImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReleaseAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60348,9 +60146,81 @@ func (m *awsEc2query_deserializeOpResetFpgaImageAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetFpgaImageAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReleaseAddress(response, &metadata) } - output := &ResetFpgaImageAttributeOutput{} + output := &ReleaseAddressOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorReleaseAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpReleaseHosts struct { +} + +func (*awsEc2query_deserializeOpReleaseHosts) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpReleaseHosts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorReleaseHosts(response, &metadata) + } + output := &ReleaseHostsOutput{} out.Result = output var buff [1024]byte @@ -60371,7 +60241,7 @@ func (m *awsEc2query_deserializeOpResetFpgaImageAttribute) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentResetFpgaImageAttributeOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentReleaseHostsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60385,7 +60255,7 @@ func (m *awsEc2query_deserializeOpResetFpgaImageAttribute) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorResetFpgaImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReleaseHosts(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60418,14 +60288,14 @@ func awsEc2query_deserializeOpErrorResetFpgaImageAttribute(response *smithyhttp. } } -type awsEc2query_deserializeOpResetImageAttribute struct { +type awsEc2query_deserializeOpReleaseIpamPoolAllocation struct { } -func (*awsEc2query_deserializeOpResetImageAttribute) ID() string { +func (*awsEc2query_deserializeOpReleaseIpamPoolAllocation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReleaseIpamPoolAllocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60443,21 +60313,44 @@ func (m *awsEc2query_deserializeOpResetImageAttribute) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetImageAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReleaseIpamPoolAllocation(response, &metadata) } - output := &ResetImageAttributeOutput{} + output := &ReleaseIpamPoolAllocationOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentReleaseIpamPoolAllocationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorResetImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReleaseIpamPoolAllocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60490,14 +60383,14 @@ func awsEc2query_deserializeOpErrorResetImageAttribute(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpResetInstanceAttribute struct { +type awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation struct { } -func (*awsEc2query_deserializeOpResetInstanceAttribute) ID() string { +func (*awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetInstanceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceIamInstanceProfileAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60515,21 +60408,44 @@ func (m *awsEc2query_deserializeOpResetInstanceAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetInstanceAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceIamInstanceProfileAssociation(response, &metadata) } - output := &ResetInstanceAttributeOutput{} + output := &ReplaceIamInstanceProfileAssociationOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentReplaceIamInstanceProfileAssociationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorResetInstanceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceIamInstanceProfileAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60562,14 +60478,14 @@ func awsEc2query_deserializeOpErrorResetInstanceAttribute(response *smithyhttp.R } } -type awsEc2query_deserializeOpResetNetworkInterfaceAttribute struct { +type awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings struct { } -func (*awsEc2query_deserializeOpResetNetworkInterfaceAttribute) ID() string { +func (*awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetNetworkInterfaceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceImageCriteriaInAllowedImagesSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60587,21 +60503,44 @@ func (m *awsEc2query_deserializeOpResetNetworkInterfaceAttribute) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetNetworkInterfaceAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceImageCriteriaInAllowedImagesSettings(response, &metadata) } - output := &ResetNetworkInterfaceAttributeOutput{} + output := &ReplaceImageCriteriaInAllowedImagesSettingsOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentReplaceImageCriteriaInAllowedImagesSettingsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorResetNetworkInterfaceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceImageCriteriaInAllowedImagesSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60634,14 +60573,14 @@ func awsEc2query_deserializeOpErrorResetNetworkInterfaceAttribute(response *smit } } -type awsEc2query_deserializeOpResetSnapshotAttribute struct { +type awsEc2query_deserializeOpReplaceNetworkAclAssociation struct { } -func (*awsEc2query_deserializeOpResetSnapshotAttribute) ID() string { +func (*awsEc2query_deserializeOpReplaceNetworkAclAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpResetSnapshotAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceNetworkAclAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60659,21 +60598,44 @@ func (m *awsEc2query_deserializeOpResetSnapshotAttribute) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorResetSnapshotAttribute(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceNetworkAclAssociation(response, &metadata) } - output := &ResetSnapshotAttributeOutput{} + output := &ReplaceNetworkAclAssociationOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentReplaceNetworkAclAssociationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorResetSnapshotAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceNetworkAclAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60706,14 +60668,14 @@ func awsEc2query_deserializeOpErrorResetSnapshotAttribute(response *smithyhttp.R } } -type awsEc2query_deserializeOpRestoreAddressToClassic struct { +type awsEc2query_deserializeOpReplaceNetworkAclEntry struct { } -func (*awsEc2query_deserializeOpRestoreAddressToClassic) ID() string { +func (*awsEc2query_deserializeOpReplaceNetworkAclEntry) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRestoreAddressToClassic) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceNetworkAclEntry) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60731,44 +60693,93 @@ func (m *awsEc2query_deserializeOpRestoreAddressToClassic) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRestoreAddressToClassic(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceNetworkAclEntry(response, &metadata) } - output := &RestoreAddressToClassicOutput{} + output := &ReplaceNetworkAclEntryOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } } - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRestoreAddressToClassicOutput(&output, decoder) + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorReplaceNetworkAclEntry(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, } + return genericError + + } +} + +type awsEc2query_deserializeOpReplaceRoute struct { +} + +func (*awsEc2query_deserializeOpReplaceRoute) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpReplaceRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorReplaceRoute(response, &metadata) + } + output := &ReplaceRouteOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + return out, metadata, err } -func awsEc2query_deserializeOpErrorRestoreAddressToClassic(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60801,14 +60812,14 @@ func awsEc2query_deserializeOpErrorRestoreAddressToClassic(response *smithyhttp. } } -type awsEc2query_deserializeOpRestoreImageFromRecycleBin struct { +type awsEc2query_deserializeOpReplaceRouteTableAssociation struct { } -func (*awsEc2query_deserializeOpRestoreImageFromRecycleBin) ID() string { +func (*awsEc2query_deserializeOpReplaceRouteTableAssociation) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRestoreImageFromRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceRouteTableAssociation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60826,9 +60837,9 @@ func (m *awsEc2query_deserializeOpRestoreImageFromRecycleBin) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRestoreImageFromRecycleBin(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceRouteTableAssociation(response, &metadata) } - output := &RestoreImageFromRecycleBinOutput{} + output := &ReplaceRouteTableAssociationOutput{} out.Result = output var buff [1024]byte @@ -60849,7 +60860,7 @@ func (m *awsEc2query_deserializeOpRestoreImageFromRecycleBin) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRestoreImageFromRecycleBinOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentReplaceRouteTableAssociationOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60863,7 +60874,7 @@ func (m *awsEc2query_deserializeOpRestoreImageFromRecycleBin) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorRestoreImageFromRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceRouteTableAssociation(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60896,14 +60907,14 @@ func awsEc2query_deserializeOpErrorRestoreImageFromRecycleBin(response *smithyht } } -type awsEc2query_deserializeOpRestoreManagedPrefixListVersion struct { +type awsEc2query_deserializeOpReplaceTransitGatewayRoute struct { } -func (*awsEc2query_deserializeOpRestoreManagedPrefixListVersion) ID() string { +func (*awsEc2query_deserializeOpReplaceTransitGatewayRoute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRestoreManagedPrefixListVersion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceTransitGatewayRoute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -60921,9 +60932,9 @@ func (m *awsEc2query_deserializeOpRestoreManagedPrefixListVersion) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRestoreManagedPrefixListVersion(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceTransitGatewayRoute(response, &metadata) } - output := &RestoreManagedPrefixListVersionOutput{} + output := &ReplaceTransitGatewayRouteOutput{} out.Result = output var buff [1024]byte @@ -60944,7 +60955,7 @@ func (m *awsEc2query_deserializeOpRestoreManagedPrefixListVersion) HandleDeseria } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRestoreManagedPrefixListVersionOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentReplaceTransitGatewayRouteOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -60958,7 +60969,7 @@ func (m *awsEc2query_deserializeOpRestoreManagedPrefixListVersion) HandleDeseria return out, metadata, err } -func awsEc2query_deserializeOpErrorRestoreManagedPrefixListVersion(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceTransitGatewayRoute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -60991,14 +61002,14 @@ func awsEc2query_deserializeOpErrorRestoreManagedPrefixListVersion(response *smi } } -type awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin struct { +type awsEc2query_deserializeOpReplaceVpnTunnel struct { } -func (*awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) ID() string { +func (*awsEc2query_deserializeOpReplaceVpnTunnel) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReplaceVpnTunnel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61016,9 +61027,9 @@ func (m *awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRestoreSnapshotFromRecycleBin(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReplaceVpnTunnel(response, &metadata) } - output := &RestoreSnapshotFromRecycleBinOutput{} + output := &ReplaceVpnTunnelOutput{} out.Result = output var buff [1024]byte @@ -61039,7 +61050,7 @@ func (m *awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRestoreSnapshotFromRecycleBinOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentReplaceVpnTunnelOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -61053,7 +61064,7 @@ func (m *awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorRestoreSnapshotFromRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReplaceVpnTunnel(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61086,14 +61097,14 @@ func awsEc2query_deserializeOpErrorRestoreSnapshotFromRecycleBin(response *smith } } -type awsEc2query_deserializeOpRestoreSnapshotTier struct { +type awsEc2query_deserializeOpReportInstanceStatus struct { } -func (*awsEc2query_deserializeOpRestoreSnapshotTier) ID() string { +func (*awsEc2query_deserializeOpReportInstanceStatus) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRestoreSnapshotTier) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpReportInstanceStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61111,44 +61122,21 @@ func (m *awsEc2query_deserializeOpRestoreSnapshotTier) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRestoreSnapshotTier(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorReportInstanceStatus(response, &metadata) } - output := &RestoreSnapshotTierOutput{} + output := &ReportInstanceStatusOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRestoreSnapshotTierOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorRestoreSnapshotTier(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorReportInstanceStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61181,14 +61169,14 @@ func awsEc2query_deserializeOpErrorRestoreSnapshotTier(response *smithyhttp.Resp } } -type awsEc2query_deserializeOpRevokeClientVpnIngress struct { +type awsEc2query_deserializeOpRequestSpotFleet struct { } -func (*awsEc2query_deserializeOpRevokeClientVpnIngress) ID() string { +func (*awsEc2query_deserializeOpRequestSpotFleet) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRevokeClientVpnIngress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRequestSpotFleet) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61206,9 +61194,9 @@ func (m *awsEc2query_deserializeOpRevokeClientVpnIngress) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRevokeClientVpnIngress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRequestSpotFleet(response, &metadata) } - output := &RevokeClientVpnIngressOutput{} + output := &RequestSpotFleetOutput{} out.Result = output var buff [1024]byte @@ -61229,7 +61217,7 @@ func (m *awsEc2query_deserializeOpRevokeClientVpnIngress) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRevokeClientVpnIngressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRequestSpotFleetOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -61243,7 +61231,7 @@ func (m *awsEc2query_deserializeOpRevokeClientVpnIngress) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorRevokeClientVpnIngress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRequestSpotFleet(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61276,14 +61264,14 @@ func awsEc2query_deserializeOpErrorRevokeClientVpnIngress(response *smithyhttp.R } } -type awsEc2query_deserializeOpRevokeSecurityGroupEgress struct { +type awsEc2query_deserializeOpRequestSpotInstances struct { } -func (*awsEc2query_deserializeOpRevokeSecurityGroupEgress) ID() string { +func (*awsEc2query_deserializeOpRequestSpotInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRevokeSecurityGroupEgress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRequestSpotInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61301,9 +61289,9 @@ func (m *awsEc2query_deserializeOpRevokeSecurityGroupEgress) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRevokeSecurityGroupEgress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRequestSpotInstances(response, &metadata) } - output := &RevokeSecurityGroupEgressOutput{} + output := &RequestSpotInstancesOutput{} out.Result = output var buff [1024]byte @@ -61324,7 +61312,7 @@ func (m *awsEc2query_deserializeOpRevokeSecurityGroupEgress) HandleDeserialize(c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRevokeSecurityGroupEgressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRequestSpotInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -61338,7 +61326,7 @@ func (m *awsEc2query_deserializeOpRevokeSecurityGroupEgress) HandleDeserialize(c return out, metadata, err } -func awsEc2query_deserializeOpErrorRevokeSecurityGroupEgress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRequestSpotInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61371,14 +61359,14 @@ func awsEc2query_deserializeOpErrorRevokeSecurityGroupEgress(response *smithyhtt } } -type awsEc2query_deserializeOpRevokeSecurityGroupIngress struct { +type awsEc2query_deserializeOpResetAddressAttribute struct { } -func (*awsEc2query_deserializeOpRevokeSecurityGroupIngress) ID() string { +func (*awsEc2query_deserializeOpResetAddressAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRevokeSecurityGroupIngress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetAddressAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61396,9 +61384,9 @@ func (m *awsEc2query_deserializeOpRevokeSecurityGroupIngress) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRevokeSecurityGroupIngress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetAddressAttribute(response, &metadata) } - output := &RevokeSecurityGroupIngressOutput{} + output := &ResetAddressAttributeOutput{} out.Result = output var buff [1024]byte @@ -61419,7 +61407,7 @@ func (m *awsEc2query_deserializeOpRevokeSecurityGroupIngress) HandleDeserialize( } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRevokeSecurityGroupIngressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentResetAddressAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -61433,7 +61421,7 @@ func (m *awsEc2query_deserializeOpRevokeSecurityGroupIngress) HandleDeserialize( return out, metadata, err } -func awsEc2query_deserializeOpErrorRevokeSecurityGroupIngress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetAddressAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61466,14 +61454,14 @@ func awsEc2query_deserializeOpErrorRevokeSecurityGroupIngress(response *smithyht } } -type awsEc2query_deserializeOpRunInstances struct { +type awsEc2query_deserializeOpResetEbsDefaultKmsKeyId struct { } -func (*awsEc2query_deserializeOpRunInstances) ID() string { +func (*awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRunInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetEbsDefaultKmsKeyId) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61491,9 +61479,9 @@ func (m *awsEc2query_deserializeOpRunInstances) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRunInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetEbsDefaultKmsKeyId(response, &metadata) } - output := &RunInstancesOutput{} + output := &ResetEbsDefaultKmsKeyIdOutput{} out.Result = output var buff [1024]byte @@ -61514,7 +61502,7 @@ func (m *awsEc2query_deserializeOpRunInstances) HandleDeserialize(ctx context.Co } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRunInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentResetEbsDefaultKmsKeyIdOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -61528,7 +61516,7 @@ func (m *awsEc2query_deserializeOpRunInstances) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsEc2query_deserializeOpErrorRunInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetEbsDefaultKmsKeyId(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61561,14 +61549,14 @@ func awsEc2query_deserializeOpErrorRunInstances(response *smithyhttp.Response, m } } -type awsEc2query_deserializeOpRunScheduledInstances struct { +type awsEc2query_deserializeOpResetFpgaImageAttribute struct { } -func (*awsEc2query_deserializeOpRunScheduledInstances) ID() string { +func (*awsEc2query_deserializeOpResetFpgaImageAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpRunScheduledInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetFpgaImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61586,9 +61574,9 @@ func (m *awsEc2query_deserializeOpRunScheduledInstances) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorRunScheduledInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetFpgaImageAttribute(response, &metadata) } - output := &RunScheduledInstancesOutput{} + output := &ResetFpgaImageAttributeOutput{} out.Result = output var buff [1024]byte @@ -61609,7 +61597,7 @@ func (m *awsEc2query_deserializeOpRunScheduledInstances) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentRunScheduledInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentResetFpgaImageAttributeOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -61623,7 +61611,7 @@ func (m *awsEc2query_deserializeOpRunScheduledInstances) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorRunScheduledInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetFpgaImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61656,14 +61644,14 @@ func awsEc2query_deserializeOpErrorRunScheduledInstances(response *smithyhttp.Re } } -type awsEc2query_deserializeOpSearchLocalGatewayRoutes struct { +type awsEc2query_deserializeOpResetImageAttribute struct { } -func (*awsEc2query_deserializeOpSearchLocalGatewayRoutes) ID() string { +func (*awsEc2query_deserializeOpResetImageAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpSearchLocalGatewayRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetImageAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61681,44 +61669,21 @@ func (m *awsEc2query_deserializeOpSearchLocalGatewayRoutes) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorSearchLocalGatewayRoutes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetImageAttribute(response, &metadata) } - output := &SearchLocalGatewayRoutesOutput{} + output := &ResetImageAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentSearchLocalGatewayRoutesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorSearchLocalGatewayRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetImageAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61751,14 +61716,14 @@ func awsEc2query_deserializeOpErrorSearchLocalGatewayRoutes(response *smithyhttp } } -type awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups struct { +type awsEc2query_deserializeOpResetInstanceAttribute struct { } -func (*awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups) ID() string { +func (*awsEc2query_deserializeOpResetInstanceAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetInstanceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61776,44 +61741,21 @@ func (m *awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorSearchTransitGatewayMulticastGroups(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetInstanceAttribute(response, &metadata) } - output := &SearchTransitGatewayMulticastGroupsOutput{} + output := &ResetInstanceAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentSearchTransitGatewayMulticastGroupsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorSearchTransitGatewayMulticastGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetInstanceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61846,14 +61788,14 @@ func awsEc2query_deserializeOpErrorSearchTransitGatewayMulticastGroups(response } } -type awsEc2query_deserializeOpSearchTransitGatewayRoutes struct { +type awsEc2query_deserializeOpResetNetworkInterfaceAttribute struct { } -func (*awsEc2query_deserializeOpSearchTransitGatewayRoutes) ID() string { +func (*awsEc2query_deserializeOpResetNetworkInterfaceAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpSearchTransitGatewayRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetNetworkInterfaceAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61871,44 +61813,21 @@ func (m *awsEc2query_deserializeOpSearchTransitGatewayRoutes) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorSearchTransitGatewayRoutes(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetNetworkInterfaceAttribute(response, &metadata) } - output := &SearchTransitGatewayRoutesOutput{} + output := &ResetNetworkInterfaceAttributeOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentSearchTransitGatewayRoutesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorSearchTransitGatewayRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetNetworkInterfaceAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -61941,14 +61860,14 @@ func awsEc2query_deserializeOpErrorSearchTransitGatewayRoutes(response *smithyht } } -type awsEc2query_deserializeOpSendDiagnosticInterrupt struct { +type awsEc2query_deserializeOpResetSnapshotAttribute struct { } -func (*awsEc2query_deserializeOpSendDiagnosticInterrupt) ID() string { +func (*awsEc2query_deserializeOpResetSnapshotAttribute) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpSendDiagnosticInterrupt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpResetSnapshotAttribute) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -61966,9 +61885,9 @@ func (m *awsEc2query_deserializeOpSendDiagnosticInterrupt) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorSendDiagnosticInterrupt(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorResetSnapshotAttribute(response, &metadata) } - output := &SendDiagnosticInterruptOutput{} + output := &ResetSnapshotAttributeOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -61980,7 +61899,7 @@ func (m *awsEc2query_deserializeOpSendDiagnosticInterrupt) HandleDeserialize(ctx return out, metadata, err } -func awsEc2query_deserializeOpErrorSendDiagnosticInterrupt(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorResetSnapshotAttribute(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62013,14 +61932,14 @@ func awsEc2query_deserializeOpErrorSendDiagnosticInterrupt(response *smithyhttp. } } -type awsEc2query_deserializeOpStartDeclarativePoliciesReport struct { +type awsEc2query_deserializeOpRestoreAddressToClassic struct { } -func (*awsEc2query_deserializeOpStartDeclarativePoliciesReport) ID() string { +func (*awsEc2query_deserializeOpRestoreAddressToClassic) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpStartDeclarativePoliciesReport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRestoreAddressToClassic) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62038,9 +61957,9 @@ func (m *awsEc2query_deserializeOpStartDeclarativePoliciesReport) HandleDeserial } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorStartDeclarativePoliciesReport(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRestoreAddressToClassic(response, &metadata) } - output := &StartDeclarativePoliciesReportOutput{} + output := &RestoreAddressToClassicOutput{} out.Result = output var buff [1024]byte @@ -62061,7 +61980,7 @@ func (m *awsEc2query_deserializeOpStartDeclarativePoliciesReport) HandleDeserial } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentStartDeclarativePoliciesReportOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRestoreAddressToClassicOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62075,7 +61994,7 @@ func (m *awsEc2query_deserializeOpStartDeclarativePoliciesReport) HandleDeserial return out, metadata, err } -func awsEc2query_deserializeOpErrorStartDeclarativePoliciesReport(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRestoreAddressToClassic(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62108,14 +62027,14 @@ func awsEc2query_deserializeOpErrorStartDeclarativePoliciesReport(response *smit } } -type awsEc2query_deserializeOpStartInstances struct { +type awsEc2query_deserializeOpRestoreImageFromRecycleBin struct { } -func (*awsEc2query_deserializeOpStartInstances) ID() string { +func (*awsEc2query_deserializeOpRestoreImageFromRecycleBin) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpStartInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRestoreImageFromRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62133,9 +62052,9 @@ func (m *awsEc2query_deserializeOpStartInstances) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorStartInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRestoreImageFromRecycleBin(response, &metadata) } - output := &StartInstancesOutput{} + output := &RestoreImageFromRecycleBinOutput{} out.Result = output var buff [1024]byte @@ -62156,7 +62075,7 @@ func (m *awsEc2query_deserializeOpStartInstances) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentStartInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRestoreImageFromRecycleBinOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62170,7 +62089,7 @@ func (m *awsEc2query_deserializeOpStartInstances) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorStartInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRestoreImageFromRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62203,14 +62122,14 @@ func awsEc2query_deserializeOpErrorStartInstances(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis struct { +type awsEc2query_deserializeOpRestoreManagedPrefixListVersion struct { } -func (*awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) ID() string { +func (*awsEc2query_deserializeOpRestoreManagedPrefixListVersion) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRestoreManagedPrefixListVersion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62228,9 +62147,9 @@ func (m *awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) Handl } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorStartNetworkInsightsAccessScopeAnalysis(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRestoreManagedPrefixListVersion(response, &metadata) } - output := &StartNetworkInsightsAccessScopeAnalysisOutput{} + output := &RestoreManagedPrefixListVersionOutput{} out.Result = output var buff [1024]byte @@ -62251,7 +62170,7 @@ func (m *awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) Handl } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentStartNetworkInsightsAccessScopeAnalysisOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRestoreManagedPrefixListVersionOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62265,7 +62184,7 @@ func (m *awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) Handl return out, metadata, err } -func awsEc2query_deserializeOpErrorStartNetworkInsightsAccessScopeAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRestoreManagedPrefixListVersion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62298,14 +62217,14 @@ func awsEc2query_deserializeOpErrorStartNetworkInsightsAccessScopeAnalysis(respo } } -type awsEc2query_deserializeOpStartNetworkInsightsAnalysis struct { +type awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin struct { } -func (*awsEc2query_deserializeOpStartNetworkInsightsAnalysis) ID() string { +func (*awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpStartNetworkInsightsAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRestoreSnapshotFromRecycleBin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62323,9 +62242,9 @@ func (m *awsEc2query_deserializeOpStartNetworkInsightsAnalysis) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorStartNetworkInsightsAnalysis(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRestoreSnapshotFromRecycleBin(response, &metadata) } - output := &StartNetworkInsightsAnalysisOutput{} + output := &RestoreSnapshotFromRecycleBinOutput{} out.Result = output var buff [1024]byte @@ -62346,7 +62265,7 @@ func (m *awsEc2query_deserializeOpStartNetworkInsightsAnalysis) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentStartNetworkInsightsAnalysisOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRestoreSnapshotFromRecycleBinOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62360,7 +62279,7 @@ func (m *awsEc2query_deserializeOpStartNetworkInsightsAnalysis) HandleDeserializ return out, metadata, err } -func awsEc2query_deserializeOpErrorStartNetworkInsightsAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRestoreSnapshotFromRecycleBin(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62393,14 +62312,14 @@ func awsEc2query_deserializeOpErrorStartNetworkInsightsAnalysis(response *smithy } } -type awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification struct { +type awsEc2query_deserializeOpRestoreSnapshotTier struct { } -func (*awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) ID() string { +func (*awsEc2query_deserializeOpRestoreSnapshotTier) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRestoreSnapshotTier) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62418,9 +62337,9 @@ func (m *awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorStartVpcEndpointServicePrivateDnsVerification(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRestoreSnapshotTier(response, &metadata) } - output := &StartVpcEndpointServicePrivateDnsVerificationOutput{} + output := &RestoreSnapshotTierOutput{} out.Result = output var buff [1024]byte @@ -62441,7 +62360,7 @@ func (m *awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentStartVpcEndpointServicePrivateDnsVerificationOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRestoreSnapshotTierOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62455,7 +62374,7 @@ func (m *awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) return out, metadata, err } -func awsEc2query_deserializeOpErrorStartVpcEndpointServicePrivateDnsVerification(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRestoreSnapshotTier(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62488,14 +62407,14 @@ func awsEc2query_deserializeOpErrorStartVpcEndpointServicePrivateDnsVerification } } -type awsEc2query_deserializeOpStopInstances struct { +type awsEc2query_deserializeOpRevokeClientVpnIngress struct { } -func (*awsEc2query_deserializeOpStopInstances) ID() string { +func (*awsEc2query_deserializeOpRevokeClientVpnIngress) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpStopInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRevokeClientVpnIngress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62513,9 +62432,9 @@ func (m *awsEc2query_deserializeOpStopInstances) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorStopInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRevokeClientVpnIngress(response, &metadata) } - output := &StopInstancesOutput{} + output := &RevokeClientVpnIngressOutput{} out.Result = output var buff [1024]byte @@ -62536,7 +62455,7 @@ func (m *awsEc2query_deserializeOpStopInstances) HandleDeserialize(ctx context.C } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentStopInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRevokeClientVpnIngressOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62550,7 +62469,7 @@ func (m *awsEc2query_deserializeOpStopInstances) HandleDeserialize(ctx context.C return out, metadata, err } -func awsEc2query_deserializeOpErrorStopInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRevokeClientVpnIngress(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62583,14 +62502,14 @@ func awsEc2query_deserializeOpErrorStopInstances(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpTerminateClientVpnConnections struct { +type awsEc2query_deserializeOpRevokeSecurityGroupEgress struct { } -func (*awsEc2query_deserializeOpTerminateClientVpnConnections) ID() string { +func (*awsEc2query_deserializeOpRevokeSecurityGroupEgress) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpTerminateClientVpnConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRevokeSecurityGroupEgress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62608,9 +62527,9 @@ func (m *awsEc2query_deserializeOpTerminateClientVpnConnections) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorTerminateClientVpnConnections(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRevokeSecurityGroupEgress(response, &metadata) } - output := &TerminateClientVpnConnectionsOutput{} + output := &RevokeSecurityGroupEgressOutput{} out.Result = output var buff [1024]byte @@ -62631,7 +62550,7 @@ func (m *awsEc2query_deserializeOpTerminateClientVpnConnections) HandleDeseriali } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentTerminateClientVpnConnectionsOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRevokeSecurityGroupEgressOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62645,7 +62564,7 @@ func (m *awsEc2query_deserializeOpTerminateClientVpnConnections) HandleDeseriali return out, metadata, err } -func awsEc2query_deserializeOpErrorTerminateClientVpnConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRevokeSecurityGroupEgress(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62678,14 +62597,14 @@ func awsEc2query_deserializeOpErrorTerminateClientVpnConnections(response *smith } } -type awsEc2query_deserializeOpTerminateInstances struct { +type awsEc2query_deserializeOpRevokeSecurityGroupIngress struct { } -func (*awsEc2query_deserializeOpTerminateInstances) ID() string { +func (*awsEc2query_deserializeOpRevokeSecurityGroupIngress) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpTerminateInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRevokeSecurityGroupIngress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62703,9 +62622,9 @@ func (m *awsEc2query_deserializeOpTerminateInstances) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorTerminateInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRevokeSecurityGroupIngress(response, &metadata) } - output := &TerminateInstancesOutput{} + output := &RevokeSecurityGroupIngressOutput{} out.Result = output var buff [1024]byte @@ -62726,7 +62645,7 @@ func (m *awsEc2query_deserializeOpTerminateInstances) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentTerminateInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRevokeSecurityGroupIngressOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62740,7 +62659,7 @@ func (m *awsEc2query_deserializeOpTerminateInstances) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorTerminateInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRevokeSecurityGroupIngress(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62773,14 +62692,14 @@ func awsEc2query_deserializeOpErrorTerminateInstances(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpUnassignIpv6Addresses struct { +type awsEc2query_deserializeOpRunInstances struct { } -func (*awsEc2query_deserializeOpUnassignIpv6Addresses) ID() string { +func (*awsEc2query_deserializeOpRunInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUnassignIpv6Addresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRunInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62798,9 +62717,9 @@ func (m *awsEc2query_deserializeOpUnassignIpv6Addresses) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUnassignIpv6Addresses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRunInstances(response, &metadata) } - output := &UnassignIpv6AddressesOutput{} + output := &RunInstancesOutput{} out.Result = output var buff [1024]byte @@ -62821,7 +62740,7 @@ func (m *awsEc2query_deserializeOpUnassignIpv6Addresses) HandleDeserialize(ctx c } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentUnassignIpv6AddressesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentRunInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -62835,7 +62754,7 @@ func (m *awsEc2query_deserializeOpUnassignIpv6Addresses) HandleDeserialize(ctx c return out, metadata, err } -func awsEc2query_deserializeOpErrorUnassignIpv6Addresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRunInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62868,14 +62787,14 @@ func awsEc2query_deserializeOpErrorUnassignIpv6Addresses(response *smithyhttp.Re } } -type awsEc2query_deserializeOpUnassignPrivateIpAddresses struct { +type awsEc2query_deserializeOpRunScheduledInstances struct { } -func (*awsEc2query_deserializeOpUnassignPrivateIpAddresses) ID() string { +func (*awsEc2query_deserializeOpRunScheduledInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUnassignPrivateIpAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpRunScheduledInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62893,21 +62812,44 @@ func (m *awsEc2query_deserializeOpUnassignPrivateIpAddresses) HandleDeserialize( } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUnassignPrivateIpAddresses(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorRunScheduledInstances(response, &metadata) } - output := &UnassignPrivateIpAddressesOutput{} + output := &RunScheduledInstancesOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentRunScheduledInstancesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsEc2query_deserializeOpErrorUnassignPrivateIpAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorRunScheduledInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -62940,14 +62882,14 @@ func awsEc2query_deserializeOpErrorUnassignPrivateIpAddresses(response *smithyht } } -type awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress struct { +type awsEc2query_deserializeOpSearchLocalGatewayRoutes struct { } -func (*awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) ID() string { +func (*awsEc2query_deserializeOpSearchLocalGatewayRoutes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpSearchLocalGatewayRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -62965,9 +62907,9 @@ func (m *awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) HandleDeseri } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUnassignPrivateNatGatewayAddress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorSearchLocalGatewayRoutes(response, &metadata) } - output := &UnassignPrivateNatGatewayAddressOutput{} + output := &SearchLocalGatewayRoutesOutput{} out.Result = output var buff [1024]byte @@ -62988,7 +62930,7 @@ func (m *awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) HandleDeseri } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentUnassignPrivateNatGatewayAddressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentSearchLocalGatewayRoutesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -63002,7 +62944,7 @@ func (m *awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) HandleDeseri return out, metadata, err } -func awsEc2query_deserializeOpErrorUnassignPrivateNatGatewayAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorSearchLocalGatewayRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -63035,14 +62977,14 @@ func awsEc2query_deserializeOpErrorUnassignPrivateNatGatewayAddress(response *sm } } -type awsEc2query_deserializeOpUnlockSnapshot struct { +type awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups struct { } -func (*awsEc2query_deserializeOpUnlockSnapshot) ID() string { +func (*awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUnlockSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpSearchTransitGatewayMulticastGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -63060,9 +63002,9 @@ func (m *awsEc2query_deserializeOpUnlockSnapshot) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUnlockSnapshot(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorSearchTransitGatewayMulticastGroups(response, &metadata) } - output := &UnlockSnapshotOutput{} + output := &SearchTransitGatewayMulticastGroupsOutput{} out.Result = output var buff [1024]byte @@ -63083,7 +63025,7 @@ func (m *awsEc2query_deserializeOpUnlockSnapshot) HandleDeserialize(ctx context. } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentUnlockSnapshotOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentSearchTransitGatewayMulticastGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -63097,7 +63039,7 @@ func (m *awsEc2query_deserializeOpUnlockSnapshot) HandleDeserialize(ctx context. return out, metadata, err } -func awsEc2query_deserializeOpErrorUnlockSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorSearchTransitGatewayMulticastGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -63130,14 +63072,14 @@ func awsEc2query_deserializeOpErrorUnlockSnapshot(response *smithyhttp.Response, } } -type awsEc2query_deserializeOpUnmonitorInstances struct { +type awsEc2query_deserializeOpSearchTransitGatewayRoutes struct { } -func (*awsEc2query_deserializeOpUnmonitorInstances) ID() string { +func (*awsEc2query_deserializeOpSearchTransitGatewayRoutes) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUnmonitorInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpSearchTransitGatewayRoutes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -63155,9 +63097,9 @@ func (m *awsEc2query_deserializeOpUnmonitorInstances) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUnmonitorInstances(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorSearchTransitGatewayRoutes(response, &metadata) } - output := &UnmonitorInstancesOutput{} + output := &SearchTransitGatewayRoutesOutput{} out.Result = output var buff [1024]byte @@ -63178,7 +63120,7 @@ func (m *awsEc2query_deserializeOpUnmonitorInstances) HandleDeserialize(ctx cont } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentUnmonitorInstancesOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentSearchTransitGatewayRoutesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -63192,7 +63134,7 @@ func (m *awsEc2query_deserializeOpUnmonitorInstances) HandleDeserialize(ctx cont return out, metadata, err } -func awsEc2query_deserializeOpErrorUnmonitorInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorSearchTransitGatewayRoutes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -63225,14 +63167,14 @@ func awsEc2query_deserializeOpErrorUnmonitorInstances(response *smithyhttp.Respo } } -type awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress struct { +type awsEc2query_deserializeOpSendDiagnosticInterrupt struct { } -func (*awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) ID() string { +func (*awsEc2query_deserializeOpSendDiagnosticInterrupt) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpSendDiagnosticInterrupt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -63250,9 +63192,81 @@ func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) Han } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsEgress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorSendDiagnosticInterrupt(response, &metadata) } - output := &UpdateSecurityGroupRuleDescriptionsEgressOutput{} + output := &SendDiagnosticInterruptOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorSendDiagnosticInterrupt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpStartDeclarativePoliciesReport struct { +} + +func (*awsEc2query_deserializeOpStartDeclarativePoliciesReport) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpStartDeclarativePoliciesReport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorStartDeclarativePoliciesReport(response, &metadata) + } + output := &StartDeclarativePoliciesReportOutput{} out.Result = output var buff [1024]byte @@ -63273,7 +63287,7 @@ func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) Han } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentUpdateSecurityGroupRuleDescriptionsEgressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentStartDeclarativePoliciesReportOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -63287,7 +63301,7 @@ func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) Han return out, metadata, err } -func awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsEgress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorStartDeclarativePoliciesReport(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -63320,14 +63334,14 @@ func awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsEgress(res } } -type awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress struct { +type awsEc2query_deserializeOpStartInstances struct { } -func (*awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) ID() string { +func (*awsEc2query_deserializeOpStartInstances) ID() string { return "OperationDeserializer" } -func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsEc2query_deserializeOpStartInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -63345,9 +63359,9 @@ func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) Ha } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsIngress(response, &metadata) + return out, metadata, awsEc2query_deserializeOpErrorStartInstances(response, &metadata) } - output := &UpdateSecurityGroupRuleDescriptionsIngressOutput{} + output := &StartInstancesOutput{} out.Result = output var buff [1024]byte @@ -63368,7 +63382,7 @@ func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) Ha } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsEc2query_deserializeOpDocumentUpdateSecurityGroupRuleDescriptionsIngressOutput(&output, decoder) + err = awsEc2query_deserializeOpDocumentStartInstancesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -63382,7 +63396,1219 @@ func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) Ha return out, metadata, err } -func awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsIngress(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsEc2query_deserializeOpErrorStartInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis struct { +} + +func (*awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpStartNetworkInsightsAccessScopeAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorStartNetworkInsightsAccessScopeAnalysis(response, &metadata) + } + output := &StartNetworkInsightsAccessScopeAnalysisOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentStartNetworkInsightsAccessScopeAnalysisOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorStartNetworkInsightsAccessScopeAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpStartNetworkInsightsAnalysis struct { +} + +func (*awsEc2query_deserializeOpStartNetworkInsightsAnalysis) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpStartNetworkInsightsAnalysis) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorStartNetworkInsightsAnalysis(response, &metadata) + } + output := &StartNetworkInsightsAnalysisOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentStartNetworkInsightsAnalysisOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorStartNetworkInsightsAnalysis(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification struct { +} + +func (*awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpStartVpcEndpointServicePrivateDnsVerification) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorStartVpcEndpointServicePrivateDnsVerification(response, &metadata) + } + output := &StartVpcEndpointServicePrivateDnsVerificationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentStartVpcEndpointServicePrivateDnsVerificationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorStartVpcEndpointServicePrivateDnsVerification(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpStopInstances struct { +} + +func (*awsEc2query_deserializeOpStopInstances) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpStopInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorStopInstances(response, &metadata) + } + output := &StopInstancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentStopInstancesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorStopInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpTerminateClientVpnConnections struct { +} + +func (*awsEc2query_deserializeOpTerminateClientVpnConnections) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpTerminateClientVpnConnections) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorTerminateClientVpnConnections(response, &metadata) + } + output := &TerminateClientVpnConnectionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentTerminateClientVpnConnectionsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorTerminateClientVpnConnections(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpTerminateInstances struct { +} + +func (*awsEc2query_deserializeOpTerminateInstances) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpTerminateInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorTerminateInstances(response, &metadata) + } + output := &TerminateInstancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentTerminateInstancesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorTerminateInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUnassignIpv6Addresses struct { +} + +func (*awsEc2query_deserializeOpUnassignIpv6Addresses) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUnassignIpv6Addresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUnassignIpv6Addresses(response, &metadata) + } + output := &UnassignIpv6AddressesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentUnassignIpv6AddressesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUnassignIpv6Addresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUnassignPrivateIpAddresses struct { +} + +func (*awsEc2query_deserializeOpUnassignPrivateIpAddresses) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUnassignPrivateIpAddresses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUnassignPrivateIpAddresses(response, &metadata) + } + output := &UnassignPrivateIpAddressesOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUnassignPrivateIpAddresses(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress struct { +} + +func (*awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUnassignPrivateNatGatewayAddress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUnassignPrivateNatGatewayAddress(response, &metadata) + } + output := &UnassignPrivateNatGatewayAddressOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentUnassignPrivateNatGatewayAddressOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUnassignPrivateNatGatewayAddress(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUnlockSnapshot struct { +} + +func (*awsEc2query_deserializeOpUnlockSnapshot) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUnlockSnapshot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUnlockSnapshot(response, &metadata) + } + output := &UnlockSnapshotOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentUnlockSnapshotOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUnlockSnapshot(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUnmonitorInstances struct { +} + +func (*awsEc2query_deserializeOpUnmonitorInstances) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUnmonitorInstances) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUnmonitorInstances(response, &metadata) + } + output := &UnmonitorInstancesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentUnmonitorInstancesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUnmonitorInstances(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress struct { +} + +func (*awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsEgress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsEgress(response, &metadata) + } + output := &UpdateSecurityGroupRuleDescriptionsEgressOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentUpdateSecurityGroupRuleDescriptionsEgressOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsEgress(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := ec2query.GetErrorResponseComponents(errorBody) + if err != nil { + return err + } + awsmiddleware.SetRequestIDMetadata(metadata, errorComponents.RequestID) + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress struct { +} + +func (*awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) ID() string { + return "OperationDeserializer" +} + +func (m *awsEc2query_deserializeOpUpdateSecurityGroupRuleDescriptionsIngress) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsIngress(response, &metadata) + } + output := &UpdateSecurityGroupRuleDescriptionsIngressOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentUpdateSecurityGroupRuleDescriptionsIngressOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsEc2query_deserializeOpErrorUpdateSecurityGroupRuleDescriptionsIngress(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -64557,6 +65783,167 @@ func awsEc2query_deserializeDocumentActiveInstanceSetUnwrapped(v *[]types.Active *v = sv return nil } +func awsEc2query_deserializeDocumentActiveVpnTunnelStatus(v **types.ActiveVpnTunnelStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ActiveVpnTunnelStatus + if *v == nil { + sv = &types.ActiveVpnTunnelStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ikeVersion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.IkeVersion = ptr.String(xtv) + } + + case strings.EqualFold("phase1DHGroup", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Phase1DHGroup = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("phase1EncryptionAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Phase1EncryptionAlgorithm = ptr.String(xtv) + } + + case strings.EqualFold("phase1IntegrityAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Phase1IntegrityAlgorithm = ptr.String(xtv) + } + + case strings.EqualFold("phase2DHGroup", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Phase2DHGroup = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("phase2EncryptionAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Phase2EncryptionAlgorithm = ptr.String(xtv) + } + + case strings.EqualFold("phase2IntegrityAlgorithm", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Phase2IntegrityAlgorithm = ptr.String(xtv) + } + + case strings.EqualFold("provisioningStatus", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ProvisioningStatus = types.VpnTunnelProvisioningStatus(xtv) + } + + case strings.EqualFold("provisioningStatusReason", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ProvisioningStatusReason = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentAddedPrincipal(v **types.AddedPrincipal, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -65083,6 +66470,19 @@ func awsEc2query_deserializeDocumentAddress(v **types.Address, decoder smithyxml sv.ServiceManaged = types.ServiceManaged(xtv) } + case strings.EqualFold("subnetId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubnetId = ptr.String(xtv) + } + case strings.EqualFold("tagSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { @@ -67374,6 +68774,86 @@ func awsEc2query_deserializeDocumentAssociatedRolesListUnwrapped(v *[]types.Asso *v = sv return nil } +func awsEc2query_deserializeDocumentAssociatedSubnetList(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentAssociatedSubnetListUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentAssociatedTargetNetwork(v **types.AssociatedTargetNetwork, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -70490,6 +71970,170 @@ func awsEc2query_deserializeDocumentCapacityAllocationsUnwrapped(v *[]types.Capa *v = sv return nil } +func awsEc2query_deserializeDocumentCapacityBlock(v **types.CapacityBlock, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CapacityBlock + if *v == nil { + sv = &types.CapacityBlock{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("availabilityZone", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZone = ptr.String(xtv) + } + + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + + case strings.EqualFold("capacityBlockId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CapacityBlockId = ptr.String(xtv) + } + + case strings.EqualFold("capacityReservationIdSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCapacityReservationIdSet(&sv.CapacityReservationIds, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("createDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.CreateDate = ptr.Time(t) + } + + case strings.EqualFold("endDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.EndDate = ptr.Time(t) + } + + case strings.EqualFold("startDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.StartDate = ptr.Time(t) + } + + case strings.EqualFold("state", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.State = types.CapacityBlockResourceState(xtv) + } + + case strings.EqualFold("tagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ultraserverType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UltraserverType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentCapacityBlockExtension(v **types.CapacityBlockExtension, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -70927,13 +72571,378 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionOffering(v **types.Cap return nil } -func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSet(v *[]types.CapacityBlockExtensionOffering, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSet(v *[]types.CapacityBlockExtensionOffering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CapacityBlockExtensionOffering + if *v == nil { + sv = make([]types.CapacityBlockExtensionOffering, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.CapacityBlockExtensionOffering + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentCapacityBlockExtensionOffering(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSetUnwrapped(v *[]types.CapacityBlockExtensionOffering, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityBlockExtensionOffering + if *v == nil { + sv = make([]types.CapacityBlockExtensionOffering, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CapacityBlockExtensionOffering + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentCapacityBlockExtensionOffering(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentCapacityBlockExtensionSet(v *[]types.CapacityBlockExtension, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CapacityBlockExtension + if *v == nil { + sv = make([]types.CapacityBlockExtension, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.CapacityBlockExtension + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentCapacityBlockExtension(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityBlockExtensionSetUnwrapped(v *[]types.CapacityBlockExtension, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityBlockExtension + if *v == nil { + sv = make([]types.CapacityBlockExtension, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CapacityBlockExtension + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentCapacityBlockExtension(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBlockOffering, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CapacityBlockOffering + if *v == nil { + sv = &types.CapacityBlockOffering{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("availabilityZone", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZone = ptr.String(xtv) + } + + case strings.EqualFold("capacityBlockDurationHours", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.CapacityBlockDurationHours = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("capacityBlockDurationMinutes", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.CapacityBlockDurationMinutes = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("capacityBlockOfferingId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CapacityBlockOfferingId = ptr.String(xtv) + } + + case strings.EqualFold("currencyCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CurrencyCode = ptr.String(xtv) + } + + case strings.EqualFold("endDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.EndDate = ptr.Time(t) + } + + case strings.EqualFold("instanceCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.InstanceCount = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("instanceType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.InstanceType = ptr.String(xtv) + } + + case strings.EqualFold("startDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.StartDate = ptr.Time(t) + } + + case strings.EqualFold("tenancy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Tenancy = types.CapacityReservationTenancy(xtv) + } + + case strings.EqualFold("ultraserverCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.UltraserverCount = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("ultraserverType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UltraserverType = ptr.String(xtv) + } + + case strings.EqualFold("upfrontFee", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UpfrontFee = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityBlockOfferingSet(v *[]types.CapacityBlockOffering, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv []types.CapacityBlockExtensionOffering + var sv []types.CapacityBlockOffering if *v == nil { - sv = make([]types.CapacityBlockExtensionOffering, 0) + sv = make([]types.CapacityBlockOffering, 0) } else { sv = *v } @@ -70949,10 +72958,10 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSet(v *[]types } switch { case strings.EqualFold("item", t.Name.Local): - var col types.CapacityBlockExtensionOffering + var col types.CapacityBlockOffering nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) destAddr := &col - if err := awsEc2query_deserializeDocumentCapacityBlockExtensionOffering(&destAddr, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentCapacityBlockOffering(&destAddr, nodeDecoder); err != nil { return err } col = *destAddr @@ -70971,22 +72980,22 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSet(v *[]types return nil } -func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSetUnwrapped(v *[]types.CapacityBlockExtensionOffering, decoder smithyxml.NodeDecoder) error { - var sv []types.CapacityBlockExtensionOffering +func awsEc2query_deserializeDocumentCapacityBlockOfferingSetUnwrapped(v *[]types.CapacityBlockOffering, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityBlockOffering if *v == nil { - sv = make([]types.CapacityBlockExtensionOffering, 0) + sv = make([]types.CapacityBlockOffering, 0) } else { sv = *v } switch { default: - var mv types.CapacityBlockExtensionOffering + var mv types.CapacityBlockOffering t := decoder.StartEl _ = t nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) destAddr := &mv - if err := awsEc2query_deserializeDocumentCapacityBlockExtensionOffering(&destAddr, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentCapacityBlockOffering(&destAddr, nodeDecoder); err != nil { return err } mv = *destAddr @@ -70995,13 +73004,13 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionOfferingSetUnwrapped(v *v = sv return nil } -func awsEc2query_deserializeDocumentCapacityBlockExtensionSet(v *[]types.CapacityBlockExtension, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeDocumentCapacityBlockSet(v *[]types.CapacityBlock, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv []types.CapacityBlockExtension + var sv []types.CapacityBlock if *v == nil { - sv = make([]types.CapacityBlockExtension, 0) + sv = make([]types.CapacityBlock, 0) } else { sv = *v } @@ -71017,10 +73026,10 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionSet(v *[]types.Capacit } switch { case strings.EqualFold("item", t.Name.Local): - var col types.CapacityBlockExtension + var col types.CapacityBlock nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) destAddr := &col - if err := awsEc2query_deserializeDocumentCapacityBlockExtension(&destAddr, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentCapacityBlock(&destAddr, nodeDecoder); err != nil { return err } col = *destAddr @@ -71039,22 +73048,22 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionSet(v *[]types.Capacit return nil } -func awsEc2query_deserializeDocumentCapacityBlockExtensionSetUnwrapped(v *[]types.CapacityBlockExtension, decoder smithyxml.NodeDecoder) error { - var sv []types.CapacityBlockExtension +func awsEc2query_deserializeDocumentCapacityBlockSetUnwrapped(v *[]types.CapacityBlock, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityBlock if *v == nil { - sv = make([]types.CapacityBlockExtension, 0) + sv = make([]types.CapacityBlock, 0) } else { sv = *v } switch { default: - var mv types.CapacityBlockExtension + var mv types.CapacityBlock t := decoder.StartEl _ = t nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) destAddr := &mv - if err := awsEc2query_deserializeDocumentCapacityBlockExtension(&destAddr, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentCapacityBlock(&destAddr, nodeDecoder); err != nil { return err } mv = *destAddr @@ -71063,13 +73072,13 @@ func awsEc2query_deserializeDocumentCapacityBlockExtensionSetUnwrapped(v *[]type *v = sv return nil } -func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBlockOffering, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeDocumentCapacityBlockStatus(v **types.CapacityBlockStatus, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.CapacityBlockOffering + var sv *types.CapacityBlockStatus if *v == nil { - sv = &types.CapacityBlockOffering{} + sv = &types.CapacityBlockStatus{} } else { sv = *v } @@ -71085,7 +73094,7 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("availabilityZone", t.Name.Local): + case strings.EqualFold("capacityBlockId", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -71095,57 +73104,16 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc } { xtv := string(val) - sv.AvailabilityZone = ptr.String(xtv) + sv.CapacityBlockId = ptr.String(xtv) } - case strings.EqualFold("capacityBlockDurationHours", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.CapacityBlockDurationHours = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("capacityBlockDurationMinutes", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.CapacityBlockDurationMinutes = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("capacityBlockOfferingId", t.Name.Local): - val, err := decoder.Value() - if err != nil { + case strings.EqualFold("capacityReservationStatusSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCapacityReservationStatusSet(&sv.CapacityReservationStatuses, nodeDecoder); err != nil { return err } - if val == nil { - break - } - { - xtv := string(val) - sv.CapacityBlockOfferingId = ptr.String(xtv) - } - case strings.EqualFold("currencyCode", t.Name.Local): + case strings.EqualFold("interconnectStatus", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -71155,10 +73123,10 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc } { xtv := string(val) - sv.CurrencyCode = ptr.String(xtv) + sv.InterconnectStatus = types.CapacityBlockInterconnectStatus(xtv) } - case strings.EqualFold("endDate", t.Name.Local): + case strings.EqualFold("totalAvailableCapacity", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -71168,14 +73136,14 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc } { xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) + i64, err := strconv.ParseInt(xtv, 10, 64) if err != nil { return err } - sv.EndDate = ptr.Time(t) + sv.TotalAvailableCapacity = ptr.Int32(int32(i64)) } - case strings.EqualFold("instanceCount", t.Name.Local): + case strings.EqualFold("totalCapacity", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -71189,23 +73157,10 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc if err != nil { return err } - sv.InstanceCount = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("instanceType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.InstanceType = ptr.String(xtv) + sv.TotalCapacity = ptr.Int32(int32(i64)) } - case strings.EqualFold("startDate", t.Name.Local): + case strings.EqualFold("totalUnavailableCapacity", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -71215,37 +73170,11 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc } { xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) + i64, err := strconv.ParseInt(xtv, 10, 64) if err != nil { return err } - sv.StartDate = ptr.Time(t) - } - - case strings.EqualFold("tenancy", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Tenancy = types.CapacityReservationTenancy(xtv) - } - - case strings.EqualFold("upfrontFee", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UpfrontFee = ptr.String(xtv) + sv.TotalUnavailableCapacity = ptr.Int32(int32(i64)) } default: @@ -71262,13 +73191,13 @@ func awsEc2query_deserializeDocumentCapacityBlockOffering(v **types.CapacityBloc return nil } -func awsEc2query_deserializeDocumentCapacityBlockOfferingSet(v *[]types.CapacityBlockOffering, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeDocumentCapacityBlockStatusSet(v *[]types.CapacityBlockStatus, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv []types.CapacityBlockOffering + var sv []types.CapacityBlockStatus if *v == nil { - sv = make([]types.CapacityBlockOffering, 0) + sv = make([]types.CapacityBlockStatus, 0) } else { sv = *v } @@ -71284,10 +73213,10 @@ func awsEc2query_deserializeDocumentCapacityBlockOfferingSet(v *[]types.Capacity } switch { case strings.EqualFold("item", t.Name.Local): - var col types.CapacityBlockOffering + var col types.CapacityBlockStatus nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) destAddr := &col - if err := awsEc2query_deserializeDocumentCapacityBlockOffering(&destAddr, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentCapacityBlockStatus(&destAddr, nodeDecoder); err != nil { return err } col = *destAddr @@ -71306,22 +73235,22 @@ func awsEc2query_deserializeDocumentCapacityBlockOfferingSet(v *[]types.Capacity return nil } -func awsEc2query_deserializeDocumentCapacityBlockOfferingSetUnwrapped(v *[]types.CapacityBlockOffering, decoder smithyxml.NodeDecoder) error { - var sv []types.CapacityBlockOffering +func awsEc2query_deserializeDocumentCapacityBlockStatusSetUnwrapped(v *[]types.CapacityBlockStatus, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityBlockStatus if *v == nil { - sv = make([]types.CapacityBlockOffering, 0) + sv = make([]types.CapacityBlockStatus, 0) } else { sv = *v } switch { default: - var mv types.CapacityBlockOffering + var mv types.CapacityBlockStatus t := decoder.StartEl _ = t nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) destAddr := &mv - if err := awsEc2query_deserializeDocumentCapacityBlockOffering(&destAddr, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentCapacityBlockStatus(&destAddr, nodeDecoder); err != nil { return err } mv = *destAddr @@ -71401,6 +73330,19 @@ func awsEc2query_deserializeDocumentCapacityReservation(v **types.CapacityReserv return err } + case strings.EqualFold("capacityBlockId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CapacityBlockId = ptr.String(xtv) + } + case strings.EqualFold("capacityReservationArn", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -72519,6 +74461,86 @@ func awsEc2query_deserializeDocumentCapacityReservationGroupSetUnwrapped(v *[]ty *v = sv return nil } +func awsEc2query_deserializeDocumentCapacityReservationIdSet(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityReservationIdSetUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentCapacityReservationInfo(v **types.CapacityReservationInfo, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -72779,6 +74801,174 @@ func awsEc2query_deserializeDocumentCapacityReservationSpecificationResponse(v * return nil } +func awsEc2query_deserializeDocumentCapacityReservationStatus(v **types.CapacityReservationStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.CapacityReservationStatus + if *v == nil { + sv = &types.CapacityReservationStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("capacityReservationId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CapacityReservationId = ptr.String(xtv) + } + + case strings.EqualFold("totalAvailableCapacity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalAvailableCapacity = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("totalCapacity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalCapacity = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("totalUnavailableCapacity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.TotalUnavailableCapacity = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityReservationStatusSet(v *[]types.CapacityReservationStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.CapacityReservationStatus + if *v == nil { + sv = make([]types.CapacityReservationStatus, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.CapacityReservationStatus + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentCapacityReservationStatus(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentCapacityReservationStatusSetUnwrapped(v *[]types.CapacityReservationStatus, decoder smithyxml.NodeDecoder) error { + var sv []types.CapacityReservationStatus + if *v == nil { + sv = make([]types.CapacityReservationStatus, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.CapacityReservationStatus + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentCapacityReservationStatus(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentCapacityReservationTargetResponse(v **types.CapacityReservationTargetResponse, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -74085,6 +76275,19 @@ func awsEc2query_deserializeDocumentClientVpnConnection(v **types.ClientVpnConne sv.ClientIp = ptr.String(xtv) } + case strings.EqualFold("clientIpv6Address", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ClientIpv6Address = ptr.String(xtv) + } + case strings.EqualFold("clientVpnEndpointId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -74542,6 +76745,19 @@ func awsEc2query_deserializeDocumentClientVpnEndpoint(v **types.ClientVpnEndpoin return err } + case strings.EqualFold("endpointIpAddressType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EndpointIpAddressType = types.EndpointIpAddressType(xtv) + } + case strings.EqualFold("securityGroupIdSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentClientVpnSecurityGroupIdSet(&sv.SecurityGroupIds, nodeDecoder); err != nil { @@ -74619,6 +76835,19 @@ func awsEc2query_deserializeDocumentClientVpnEndpoint(v **types.ClientVpnEndpoin return err } + case strings.EqualFold("trafficIpAddressType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TrafficIpAddressType = types.TrafficIpAddressType(xtv) + } + case strings.EqualFold("transportProtocol", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -78497,6 +80726,136 @@ func awsEc2query_deserializeDocumentDeleteQueuedReservedInstancesError(v **types return nil } +func awsEc2query_deserializeDocumentDeleteSnapshotResultSet(v *[]types.DeleteSnapshotReturnCode, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DeleteSnapshotReturnCode + if *v == nil { + sv = make([]types.DeleteSnapshotReturnCode, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.DeleteSnapshotReturnCode + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentDeleteSnapshotReturnCode(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentDeleteSnapshotResultSetUnwrapped(v *[]types.DeleteSnapshotReturnCode, decoder smithyxml.NodeDecoder) error { + var sv []types.DeleteSnapshotReturnCode + if *v == nil { + sv = make([]types.DeleteSnapshotReturnCode, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DeleteSnapshotReturnCode + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentDeleteSnapshotReturnCode(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentDeleteSnapshotReturnCode(v **types.DeleteSnapshotReturnCode, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DeleteSnapshotReturnCode + if *v == nil { + sv = &types.DeleteSnapshotReturnCode{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("returnCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReturnCode = types.SnapshotReturnCodes(xtv) + } + + case strings.EqualFold("snapshotId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SnapshotId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentDeprovisionedAddressSet(v *[]string, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -81137,6 +83496,32 @@ func awsEc2query_deserializeDocumentEbsBlockDevice(v **types.EbsBlockDevice, dec originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("availabilityZone", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZone = ptr.String(xtv) + } + + case strings.EqualFold("AvailabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("deleteOnTermination", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -82113,6 +84498,19 @@ func awsEc2query_deserializeDocumentEc2InstanceConnectEndpoint(v **types.Ec2Inst sv.InstanceConnectEndpointId = ptr.String(xtv) } + case strings.EqualFold("ipAddressType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.IpAddressType = types.IpAddressType(xtv) + } + case strings.EqualFold("networkInterfaceIdSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentNetworkInterfaceIdSet(&sv.NetworkInterfaceIds, nodeDecoder); err != nil { @@ -82148,6 +84546,12 @@ func awsEc2query_deserializeDocumentEc2InstanceConnectEndpoint(v **types.Ec2Inst sv.PreserveClientIp = ptr.Bool(xtv) } + case strings.EqualFold("publicDnsNames", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentInstanceConnectEndpointPublicDnsNames(&sv.PublicDnsNames, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("securityGroupIdSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentSecurityGroupIdSet(&sv.SecurityGroupIds, nodeDecoder); err != nil { @@ -90737,6 +93141,22 @@ func awsEc2query_deserializeDocumentImage(v **types.Image, decoder smithyxml.Nod sv.EnaSupport = ptr.Bool(xtv) } + case strings.EqualFold("freeTierEligible", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.FreeTierEligible = ptr.Bool(xtv) + } + case strings.EqualFold("hypervisor", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -91694,6 +94114,873 @@ func awsEc2query_deserializeDocumentImageRecycleBinInfoListUnwrapped(v *[]types. *v = sv return nil } +func awsEc2query_deserializeDocumentImageReference(v **types.ImageReference, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ImageReference + if *v == nil { + sv = &types.ImageReference{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("imageId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ImageId = ptr.String(xtv) + } + + case strings.EqualFold("resourceType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ResourceType = types.ImageReferenceResourceType(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageReferenceList(v *[]types.ImageReference, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ImageReference + if *v == nil { + sv = make([]types.ImageReference, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.ImageReference + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentImageReference(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageReferenceListUnwrapped(v *[]types.ImageReference, decoder smithyxml.NodeDecoder) error { + var sv []types.ImageReference + if *v == nil { + sv = make([]types.ImageReference, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ImageReference + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentImageReference(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentImageUsageReport(v **types.ImageUsageReport, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ImageUsageReport + if *v == nil { + sv = &types.ImageUsageReport{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("accountIdSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentUserIdList(&sv.AccountIds, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("creationTime", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case strings.EqualFold("expirationTime", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.ExpirationTime = ptr.Time(t) + } + + case strings.EqualFold("imageId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ImageId = ptr.String(xtv) + } + + case strings.EqualFold("reportId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReportId = ptr.String(xtv) + } + + case strings.EqualFold("resourceTypeSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageUsageResourceTypeList(&sv.ResourceTypes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("state", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.State = ptr.String(xtv) + } + + case strings.EqualFold("stateReason", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StateReason = ptr.String(xtv) + } + + case strings.EqualFold("tagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageReportEntry(v **types.ImageUsageReportEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ImageUsageReportEntry + if *v == nil { + sv = &types.ImageUsageReportEntry{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("accountId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccountId = ptr.String(xtv) + } + + case strings.EqualFold("imageId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ImageId = ptr.String(xtv) + } + + case strings.EqualFold("reportCreationTime", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.ReportCreationTime = ptr.Time(t) + } + + case strings.EqualFold("reportId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReportId = ptr.String(xtv) + } + + case strings.EqualFold("resourceType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ResourceType = ptr.String(xtv) + } + + case strings.EqualFold("usageCount", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.UsageCount = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageReportEntryList(v *[]types.ImageUsageReportEntry, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ImageUsageReportEntry + if *v == nil { + sv = make([]types.ImageUsageReportEntry, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.ImageUsageReportEntry + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentImageUsageReportEntry(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageReportEntryListUnwrapped(v *[]types.ImageUsageReportEntry, decoder smithyxml.NodeDecoder) error { + var sv []types.ImageUsageReportEntry + if *v == nil { + sv = make([]types.ImageUsageReportEntry, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ImageUsageReportEntry + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentImageUsageReportEntry(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentImageUsageReportList(v *[]types.ImageUsageReport, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ImageUsageReport + if *v == nil { + sv = make([]types.ImageUsageReport, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.ImageUsageReport + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentImageUsageReport(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageReportListUnwrapped(v *[]types.ImageUsageReport, decoder smithyxml.NodeDecoder) error { + var sv []types.ImageUsageReport + if *v == nil { + sv = make([]types.ImageUsageReport, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ImageUsageReport + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentImageUsageReport(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentImageUsageResourceType(v **types.ImageUsageResourceType, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ImageUsageResourceType + if *v == nil { + sv = &types.ImageUsageResourceType{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("resourceType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ResourceType = ptr.String(xtv) + } + + case strings.EqualFold("resourceTypeOptionSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageUsageResourceTypeOptionList(&sv.ResourceTypeOptions, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageResourceTypeList(v *[]types.ImageUsageResourceType, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ImageUsageResourceType + if *v == nil { + sv = make([]types.ImageUsageResourceType, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.ImageUsageResourceType + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentImageUsageResourceType(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageResourceTypeListUnwrapped(v *[]types.ImageUsageResourceType, decoder smithyxml.NodeDecoder) error { + var sv []types.ImageUsageResourceType + if *v == nil { + sv = make([]types.ImageUsageResourceType, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ImageUsageResourceType + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentImageUsageResourceType(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentImageUsageResourceTypeOption(v **types.ImageUsageResourceTypeOption, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ImageUsageResourceTypeOption + if *v == nil { + sv = &types.ImageUsageResourceTypeOption{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("optionName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.OptionName = ptr.String(xtv) + } + + case strings.EqualFold("optionValueSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageUsageResourceTypeOptionValuesList(&sv.OptionValues, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageResourceTypeOptionList(v *[]types.ImageUsageResourceTypeOption, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.ImageUsageResourceTypeOption + if *v == nil { + sv = make([]types.ImageUsageResourceTypeOption, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.ImageUsageResourceTypeOption + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentImageUsageResourceTypeOption(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageResourceTypeOptionListUnwrapped(v *[]types.ImageUsageResourceTypeOption, decoder smithyxml.NodeDecoder) error { + var sv []types.ImageUsageResourceTypeOption + if *v == nil { + sv = make([]types.ImageUsageResourceTypeOption, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.ImageUsageResourceTypeOption + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentImageUsageResourceTypeOption(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsEc2query_deserializeDocumentImageUsageResourceTypeOptionValuesList(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentImageUsageResourceTypeOptionValuesListUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentImportImageLicenseConfigurationResponse(v **types.ImportImageLicenseConfigurationResponse, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -92234,6 +95521,19 @@ func awsEc2query_deserializeDocumentImportInstanceVolumeDetailItem(v **types.Imp sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("bytesConverted", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -92561,6 +95861,19 @@ func awsEc2query_deserializeDocumentImportVolumeTaskDetails(v **types.ImportVolu sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("bytesConverted", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -92882,6 +96195,89 @@ func awsEc2query_deserializeDocumentInferenceDeviceMemoryInfo(v **types.Inferenc return nil } +func awsEc2query_deserializeDocumentInitializationStatusDetails(v **types.InitializationStatusDetails, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InitializationStatusDetails + if *v == nil { + sv = &types.InitializationStatusDetails{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("estimatedTimeToCompleteInSeconds", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.EstimatedTimeToCompleteInSeconds = ptr.Int64(i64) + } + + case strings.EqualFold("initializationType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.InitializationType = types.InitializationType(xtv) + } + + case strings.EqualFold("progress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Progress = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentInsideCidrBlocksStringList(v *[]string, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -93033,6 +96429,19 @@ func awsEc2query_deserializeDocumentInstance(v **types.Instance, decoder smithyx sv.BootMode = types.BootModeValues(xtv) } + case strings.EqualFold("capacityBlockId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CapacityBlockId = ptr.String(xtv) + } + case strings.EqualFold("capacityReservationId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -93948,6 +97357,116 @@ func awsEc2query_deserializeDocumentInstanceCapacity(v **types.InstanceCapacity, return nil } +func awsEc2query_deserializeDocumentInstanceConnectEndpointDnsNames(v **types.InstanceConnectEndpointDnsNames, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InstanceConnectEndpointDnsNames + if *v == nil { + sv = &types.InstanceConnectEndpointDnsNames{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("dnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DnsName = ptr.String(xtv) + } + + case strings.EqualFold("fipsDnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FipsDnsName = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentInstanceConnectEndpointPublicDnsNames(v **types.InstanceConnectEndpointPublicDnsNames, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InstanceConnectEndpointPublicDnsNames + if *v == nil { + sv = &types.InstanceConnectEndpointPublicDnsNames{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("dualstack", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentInstanceConnectEndpointDnsNames(&sv.Dualstack, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("ipv4", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentInstanceConnectEndpointDnsNames(&sv.Ipv4, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentInstanceConnectEndpointSet(v *[]types.Ec2InstanceConnectEndpoint, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -95858,6 +99377,19 @@ func awsEc2query_deserializeDocumentInstanceMaintenanceOptions(v **types.Instanc sv.AutoRecovery = types.InstanceAutoRecoveryState(xtv) } + case strings.EqualFold("rebootMigration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.RebootMigration = types.InstanceRebootMigrationState(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -97937,6 +101469,19 @@ func awsEc2query_deserializeDocumentInstanceStatus(v **types.InstanceStatus, dec sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("eventsSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentInstanceStatusEventList(&sv.Events, nodeDecoder); err != nil { @@ -98729,6 +102274,19 @@ func awsEc2query_deserializeDocumentInstanceTopology(v **types.InstanceTopology, sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("capacityBlockId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CapacityBlockId = ptr.String(xtv) + } + case strings.EqualFold("groupName", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -99088,6 +102646,19 @@ func awsEc2query_deserializeDocumentInstanceTypeInfo(v **types.InstanceTypeInfo, return err } + case strings.EqualFold("rebootMigrationSupport", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.RebootMigrationSupport = types.RebootMigrationSupport(xtv) + } + case strings.EqualFold("supportedBootModes", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentBootModeTypeList(&sv.SupportedBootModes, nodeDecoder); err != nil { @@ -108978,6 +112549,19 @@ func awsEc2query_deserializeDocumentLaunchTemplatePlacement(v **types.LaunchTemp sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("groupId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -112089,6 +115673,191 @@ func awsEc2query_deserializeDocumentMacHostListUnwrapped(v *[]types.MacHost, dec *v = sv return nil } +func awsEc2query_deserializeDocumentMacModificationTask(v **types.MacModificationTask, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MacModificationTask + if *v == nil { + sv = &types.MacModificationTask{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("instanceId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.InstanceId = ptr.String(xtv) + } + + case strings.EqualFold("macModificationTaskId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.MacModificationTaskId = ptr.String(xtv) + } + + case strings.EqualFold("macSystemIntegrityProtectionConfig", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMacSystemIntegrityProtectionConfiguration(&sv.MacSystemIntegrityProtectionConfig, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("startTime", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.StartTime = ptr.Time(t) + } + + case strings.EqualFold("tagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("taskState", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TaskState = types.MacModificationTaskState(xtv) + } + + case strings.EqualFold("taskType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TaskType = types.MacModificationTaskType(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMacModificationTaskList(v *[]types.MacModificationTask, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.MacModificationTask + if *v == nil { + sv = make([]types.MacModificationTask, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.MacModificationTask + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentMacModificationTask(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentMacModificationTaskListUnwrapped(v *[]types.MacModificationTask, decoder smithyxml.NodeDecoder) error { + var sv []types.MacModificationTask + if *v == nil { + sv = make([]types.MacModificationTask, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.MacModificationTask + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentMacModificationTask(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentMacOSVersionStringList(v *[]string, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -112169,6 +115938,146 @@ func awsEc2query_deserializeDocumentMacOSVersionStringListUnwrapped(v *[]string, *v = sv return nil } +func awsEc2query_deserializeDocumentMacSystemIntegrityProtectionConfiguration(v **types.MacSystemIntegrityProtectionConfiguration, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MacSystemIntegrityProtectionConfiguration + if *v == nil { + sv = &types.MacSystemIntegrityProtectionConfiguration{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("appleInternal", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AppleInternal = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("baseSystem", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BaseSystem = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("debuggingRestrictions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DebuggingRestrictions = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("dTraceRestrictions", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DTraceRestrictions = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("filesystemProtections", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FilesystemProtections = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("kextSigning", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KextSigning = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("nvramProtections", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NvramProtections = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + case strings.EqualFold("status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.MacSystemIntegrityProtectionSettingStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentMaintenanceDetails(v **types.MaintenanceDetails, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -115993,6 +119902,12 @@ func awsEc2query_deserializeDocumentNetworkInterface(v **types.NetworkInterface, originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("associatedSubnetSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentAssociatedSubnetList(&sv.AssociatedSubnets, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("association", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentNetworkInterfaceAssociation(&sv.Association, nodeDecoder); err != nil { @@ -116209,6 +120124,25 @@ func awsEc2query_deserializeDocumentNetworkInterface(v **types.NetworkInterface, return err } + case strings.EqualFold("publicDnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PublicDnsName = ptr.String(xtv) + } + + case strings.EqualFold("publicIpDnsNameOptions", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentPublicIpDnsNameOptions(&sv.PublicIpDnsNameOptions, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("requesterId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -116819,6 +120753,19 @@ func awsEc2query_deserializeDocumentNetworkInterfaceIpv6Address(v **types.Networ sv.IsPrimaryIpv6 = ptr.Bool(xtv) } + case strings.EqualFold("publicIpv6DnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PublicIpv6DnsName = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -120098,6 +124045,19 @@ func awsEc2query_deserializeDocumentPlacement(v **types.Placement, decoder smith sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("groupId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -123049,6 +127009,94 @@ func awsEc2query_deserializeDocumentPtrUpdateStatus(v **types.PtrUpdateStatus, d return nil } +func awsEc2query_deserializeDocumentPublicIpDnsNameOptions(v **types.PublicIpDnsNameOptions, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PublicIpDnsNameOptions + if *v == nil { + sv = &types.PublicIpDnsNameOptions{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("dnsHostnameType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DnsHostnameType = ptr.String(xtv) + } + + case strings.EqualFold("publicDualStackDnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PublicDualStackDnsName = ptr.String(xtv) + } + + case strings.EqualFold("publicIpv4DnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PublicIpv4DnsName = ptr.String(xtv) + } + + case strings.EqualFold("publicIpv6DnsName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PublicIpv6DnsName = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentPublicIpv4Pool(v **types.PublicIpv4Pool, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -127306,6 +131354,19 @@ func awsEc2query_deserializeDocumentRoute(v **types.Route, decoder smithyxml.Nod sv.InstanceOwnerId = ptr.String(xtv) } + case strings.EqualFold("ipAddress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.IpAddress = ptr.String(xtv) + } + case strings.EqualFold("localGatewayId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -127345,6 +131406,19 @@ func awsEc2query_deserializeDocumentRoute(v **types.Route, decoder smithyxml.Nod sv.NetworkInterfaceId = ptr.String(xtv) } + case strings.EqualFold("odbNetworkArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.OdbNetworkArn = ptr.String(xtv) + } + case strings.EqualFold("origin", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -129110,6 +133184,19 @@ func awsEc2query_deserializeDocumentRouteTableAssociation(v **types.RouteTableAs sv.Main = ptr.Bool(xtv) } + case strings.EqualFold("publicIpv4Pool", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PublicIpv4Pool = ptr.String(xtv) + } + case strings.EqualFold("routeTableAssociationId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -131761,6 +135848,19 @@ func awsEc2query_deserializeDocumentSecurityGroupVpcAssociation(v **types.Securi sv.GroupId = ptr.String(xtv) } + case strings.EqualFold("groupOwnerId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.GroupOwnerId = ptr.String(xtv) + } + case strings.EqualFold("state", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -131933,6 +136033,12 @@ func awsEc2query_deserializeDocumentServiceConfiguration(v **types.ServiceConfig sv.AcceptanceRequired = ptr.Bool(xtv) } + case strings.EqualFold("availabilityZoneIdSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentValueStringList(&sv.AvailabilityZoneIds, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("availabilityZoneSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentValueStringList(&sv.AvailabilityZones, nodeDecoder); err != nil { @@ -132204,6 +136310,12 @@ func awsEc2query_deserializeDocumentServiceDetail(v **types.ServiceDetail, decod sv.AcceptanceRequired = ptr.Bool(xtv) } + case strings.EqualFold("availabilityZoneIdSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentValueStringList(&sv.AvailabilityZoneIds, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("availabilityZoneSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentValueStringList(&sv.AvailabilityZones, nodeDecoder); err != nil { @@ -135700,6 +139812,19 @@ func awsEc2query_deserializeDocumentSpotInstanceRequest(v **types.SpotInstanceRe sv.LaunchedAvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("launchedAvailabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.LaunchedAvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("launchGroup", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -136499,6 +140624,19 @@ func awsEc2query_deserializeDocumentSpotPrice(v **types.SpotPrice, decoder smith sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("instanceType", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -137654,6 +141792,19 @@ func awsEc2query_deserializeDocumentSubnet(v **types.Subnet, decoder smithyxml.N return err } + case strings.EqualFold("type", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Type = ptr.String(xtv) + } + case strings.EqualFold("vpcId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -148359,6 +152510,86 @@ func awsEc2query_deserializeDocumentUserIdGroupPairSetUnwrapped(v *[]types.UserI *v = sv return nil } +func awsEc2query_deserializeDocumentUserIdList(v *[]string, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + decoder = memberDecoder + switch { + case strings.EqualFold("item", t.Name.Local): + var col string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + col = xtv + } + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentUserIdListUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { + var sv []string + if *v == nil { + sv = make([]string, 0) + } else { + sv = *v + } + + switch { + default: + var mv string + t := decoder.StartEl + _ = t + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentValidationError(v **types.ValidationError, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -151994,6 +156225,19 @@ func awsEc2query_deserializeDocumentVolume(v **types.Volume, decoder smithyxml.N sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("createTime", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -153608,6 +157852,12 @@ func awsEc2query_deserializeDocumentVolumeStatusItem(v **types.VolumeStatusItem, return err } + case strings.EqualFold("initializationStatusDetails", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentInitializationStatusDetails(&sv.InitializationStatusDetails, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("outpostArn", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -156576,6 +160826,19 @@ func awsEc2query_deserializeDocumentVpnConnection(v **types.VpnConnection, decod return err } + case strings.EqualFold("preSharedKeyArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PreSharedKeyArn = ptr.String(xtv) + } + case strings.EqualFold("routes", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentVpnStaticRouteList(&sv.Routes, nodeDecoder); err != nil { @@ -161043,6 +165306,48 @@ func awsEc2query_deserializeOpDocumentCreateDefaultVpcOutput(v **CreateDefaultVp return nil } +func awsEc2query_deserializeOpDocumentCreateDelegateMacVolumeOwnershipTaskOutput(v **CreateDelegateMacVolumeOwnershipTaskOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateDelegateMacVolumeOwnershipTaskOutput + if *v == nil { + sv = &CreateDelegateMacVolumeOwnershipTaskOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("macModificationTask", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMacModificationTask(&sv.MacModificationTask, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentCreateDhcpOptionsOutput(v **CreateDhcpOptionsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -161373,6 +165678,55 @@ func awsEc2query_deserializeOpDocumentCreateImageOutput(v **CreateImageOutput, d return nil } +func awsEc2query_deserializeOpDocumentCreateImageUsageReportOutput(v **CreateImageUsageReportOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateImageUsageReportOutput + if *v == nil { + sv = &CreateImageUsageReportOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("reportId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ReportId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentCreateInstanceConnectEndpointOutput(v **CreateInstanceConnectEndpointOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -162206,6 +166560,48 @@ func awsEc2query_deserializeOpDocumentCreateLocalGatewayVirtualInterfaceOutput(v return nil } +func awsEc2query_deserializeOpDocumentCreateMacSystemIntegrityProtectionModificationTaskOutput(v **CreateMacSystemIntegrityProtectionModificationTaskOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateMacSystemIntegrityProtectionModificationTaskOutput + if *v == nil { + sv = &CreateMacSystemIntegrityProtectionModificationTaskOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("macModificationTask", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMacModificationTask(&sv.MacModificationTask, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentCreateManagedPrefixListOutput(v **CreateManagedPrefixListOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -164533,6 +168929,19 @@ func awsEc2query_deserializeOpDocumentCreateVolumeOutput(v **CreateVolumeOutput, sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("availabilityZoneId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AvailabilityZoneId = ptr.String(xtv) + } + case strings.EqualFold("createTime", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -165562,6 +169971,58 @@ func awsEc2query_deserializeOpDocumentDeleteFpgaImageOutput(v **DeleteFpgaImageO return nil } +func awsEc2query_deserializeOpDocumentDeleteImageUsageReportOutput(v **DeleteImageUsageReportOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DeleteImageUsageReportOutput + if *v == nil { + sv = &DeleteImageUsageReportOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("return", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.Return = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentDeleteInstanceConnectEndpointOutput(v **DeleteInstanceConnectEndpointOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -168184,6 +172645,28 @@ func awsEc2query_deserializeOpDocumentDeregisterImageOutput(v **DeregisterImageO originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("deleteSnapshotResultSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentDeleteSnapshotResultSet(&sv.DeleteSnapshotResults, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("return", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.Return = ptr.Bool(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -168935,6 +173418,116 @@ func awsEc2query_deserializeOpDocumentDescribeCapacityBlockOfferingsOutput(v **D return nil } +func awsEc2query_deserializeOpDocumentDescribeCapacityBlocksOutput(v **DescribeCapacityBlocksOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeCapacityBlocksOutput + if *v == nil { + sv = &DescribeCapacityBlocksOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("capacityBlockSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCapacityBlockSet(&sv.CapacityBlocks, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeOpDocumentDescribeCapacityBlockStatusOutput(v **DescribeCapacityBlockStatusOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeCapacityBlockStatusOutput + if *v == nil { + sv = &DescribeCapacityBlockStatusOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("capacityBlockStatusSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCapacityBlockStatusSet(&sv.CapacityBlockStatuses, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentDescribeCapacityReservationBillingRequestsOutput(v **DescribeCapacityReservationBillingRequestsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -170876,6 +175469,61 @@ func awsEc2query_deserializeOpDocumentDescribeImageAttributeOutput(v **DescribeI return nil } +func awsEc2query_deserializeOpDocumentDescribeImageReferencesOutput(v **DescribeImageReferencesOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeImageReferencesOutput + if *v == nil { + sv = &DescribeImageReferencesOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("imageReferenceSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageReferenceList(&sv.ImageReferences, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -170931,6 +175579,116 @@ func awsEc2query_deserializeOpDocumentDescribeImagesOutput(v **DescribeImagesOut return nil } +func awsEc2query_deserializeOpDocumentDescribeImageUsageReportEntriesOutput(v **DescribeImageUsageReportEntriesOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeImageUsageReportEntriesOutput + if *v == nil { + sv = &DescribeImageUsageReportEntriesOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("imageUsageReportEntrySet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageUsageReportEntryList(&sv.ImageUsageReportEntries, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeOpDocumentDescribeImageUsageReportsOutput(v **DescribeImageUsageReportsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeImageUsageReportsOutput + if *v == nil { + sv = &DescribeImageUsageReportsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("imageUsageReportSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentImageUsageReportList(&sv.ImageUsageReports, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentDescribeImportImageTasksOutput(v **DescribeImportImageTasksOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -172480,13 +177238,123 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVirtualInter return nil } -func awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociationsOutput(v **DescribeLocalGatewayRouteTableVpcAssociationsOutput, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociationsOutput(v **DescribeLocalGatewayRouteTableVpcAssociationsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeLocalGatewayRouteTableVpcAssociationsOutput + if *v == nil { + sv = &DescribeLocalGatewayRouteTableVpcAssociationsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("localGatewayRouteTableVpcAssociationSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentLocalGatewayRouteTableVpcAssociationSet(&sv.LocalGatewayRouteTableVpcAssociations, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeOpDocumentDescribeLocalGatewaysOutput(v **DescribeLocalGatewaysOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeLocalGatewaysOutput + if *v == nil { + sv = &DescribeLocalGatewaysOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("localGatewaySet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentLocalGatewaySet(&sv.LocalGateways, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("nextToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NextToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroupsOutput(v **DescribeLocalGatewayVirtualInterfaceGroupsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeLocalGatewayRouteTableVpcAssociationsOutput + var sv *DescribeLocalGatewayVirtualInterfaceGroupsOutput if *v == nil { - sv = &DescribeLocalGatewayRouteTableVpcAssociationsOutput{} + sv = &DescribeLocalGatewayVirtualInterfaceGroupsOutput{} } else { sv = *v } @@ -172502,9 +177370,9 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociati originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("localGatewayRouteTableVpcAssociationSet", t.Name.Local): + case strings.EqualFold("localGatewayVirtualInterfaceGroupSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsEc2query_deserializeDocumentLocalGatewayRouteTableVpcAssociationSet(&sv.LocalGatewayRouteTableVpcAssociations, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentLocalGatewayVirtualInterfaceGroupSet(&sv.LocalGatewayVirtualInterfaceGroups, nodeDecoder); err != nil { return err } @@ -172535,13 +177403,13 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociati return nil } -func awsEc2query_deserializeOpDocumentDescribeLocalGatewaysOutput(v **DescribeLocalGatewaysOutput, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfacesOutput(v **DescribeLocalGatewayVirtualInterfacesOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeLocalGatewaysOutput + var sv *DescribeLocalGatewayVirtualInterfacesOutput if *v == nil { - sv = &DescribeLocalGatewaysOutput{} + sv = &DescribeLocalGatewayVirtualInterfacesOutput{} } else { sv = *v } @@ -172557,9 +177425,9 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewaysOutput(v **DescribeLo originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("localGatewaySet", t.Name.Local): + case strings.EqualFold("localGatewayVirtualInterfaceSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsEc2query_deserializeDocumentLocalGatewaySet(&sv.LocalGateways, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentLocalGatewayVirtualInterfaceSet(&sv.LocalGatewayVirtualInterfaces, nodeDecoder); err != nil { return err } @@ -172590,13 +177458,13 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewaysOutput(v **DescribeLo return nil } -func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroupsOutput(v **DescribeLocalGatewayVirtualInterfaceGroupsOutput, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(v **DescribeLockedSnapshotsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeLocalGatewayVirtualInterfaceGroupsOutput + var sv *DescribeLockedSnapshotsOutput if *v == nil { - sv = &DescribeLocalGatewayVirtualInterfaceGroupsOutput{} + sv = &DescribeLockedSnapshotsOutput{} } else { sv = *v } @@ -172612,12 +177480,6 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroups originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("localGatewayVirtualInterfaceGroupSet", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsEc2query_deserializeDocumentLocalGatewayVirtualInterfaceGroupSet(&sv.LocalGatewayVirtualInterfaceGroups, nodeDecoder); err != nil { - return err - } - case strings.EqualFold("nextToken", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -172631,60 +177493,11 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroups sv.NextToken = ptr.String(xtv) } - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfacesOutput(v **DescribeLocalGatewayVirtualInterfacesOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DescribeLocalGatewayVirtualInterfacesOutput - if *v == nil { - sv = &DescribeLocalGatewayVirtualInterfacesOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("localGatewayVirtualInterfaceSet", t.Name.Local): + case strings.EqualFold("snapshotSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsEc2query_deserializeDocumentLocalGatewayVirtualInterfaceSet(&sv.LocalGatewayVirtualInterfaces, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("nextToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { + if err := awsEc2query_deserializeDocumentLockedSnapshotsInfoList(&sv.Snapshots, nodeDecoder); err != nil { return err } - if val == nil { - break - } - { - xtv := string(val) - sv.NextToken = ptr.String(xtv) - } default: // Do nothing and ignore the unexpected tag element @@ -172700,13 +177513,13 @@ func awsEc2query_deserializeOpDocumentDescribeLocalGatewayVirtualInterfacesOutpu return nil } -func awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(v **DescribeLockedSnapshotsOutput, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(v **DescribeMacHostsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeLockedSnapshotsOutput + var sv *DescribeMacHostsOutput if *v == nil { - sv = &DescribeLockedSnapshotsOutput{} + sv = &DescribeMacHostsOutput{} } else { sv = *v } @@ -172722,6 +177535,12 @@ func awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(v **Describe originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("macHostSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentMacHostList(&sv.MacHosts, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("nextToken", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -172735,12 +177554,6 @@ func awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(v **Describe sv.NextToken = ptr.String(xtv) } - case strings.EqualFold("snapshotSet", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsEc2query_deserializeDocumentLockedSnapshotsInfoList(&sv.Snapshots, nodeDecoder); err != nil { - return err - } - default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -172755,13 +177568,13 @@ func awsEc2query_deserializeOpDocumentDescribeLockedSnapshotsOutput(v **Describe return nil } -func awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(v **DescribeMacHostsOutput, decoder smithyxml.NodeDecoder) error { +func awsEc2query_deserializeOpDocumentDescribeMacModificationTasksOutput(v **DescribeMacModificationTasksOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeMacHostsOutput + var sv *DescribeMacModificationTasksOutput if *v == nil { - sv = &DescribeMacHostsOutput{} + sv = &DescribeMacModificationTasksOutput{} } else { sv = *v } @@ -172777,9 +177590,9 @@ func awsEc2query_deserializeOpDocumentDescribeMacHostsOutput(v **DescribeMacHost originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("macHostSet", t.Name.Local): + case strings.EqualFold("macModificationTaskSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsEc2query_deserializeDocumentMacHostList(&sv.MacHosts, nodeDecoder); err != nil { + if err := awsEc2query_deserializeDocumentMacModificationTaskList(&sv.MacModificationTasks, nodeDecoder); err != nil { return err } @@ -180877,6 +185690,48 @@ func awsEc2query_deserializeOpDocumentExportVerifiedAccessInstanceClientConfigur return nil } +func awsEc2query_deserializeOpDocumentGetActiveVpnTunnelStatusOutput(v **GetActiveVpnTunnelStatusOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetActiveVpnTunnelStatusOutput + if *v == nil { + sv = &GetActiveVpnTunnelStatusOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("activeVpnTunnelStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentActiveVpnTunnelStatus(&sv.ActiveVpnTunnelStatus, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentGetAllowedImagesSettingsOutput(v **GetAllowedImagesSettingsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -185694,6 +190549,58 @@ func awsEc2query_deserializeOpDocumentModifyInstanceCapacityReservationAttribute return nil } +func awsEc2query_deserializeOpDocumentModifyInstanceConnectEndpointOutput(v **ModifyInstanceConnectEndpointOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ModifyInstanceConnectEndpointOutput + if *v == nil { + sv = &ModifyInstanceConnectEndpointOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("return", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.Return = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentModifyInstanceCpuOptionsOutput(v **ModifyInstanceCpuOptionsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -185957,6 +190864,19 @@ func awsEc2query_deserializeOpDocumentModifyInstanceMaintenanceOptionsOutput(v * sv.InstanceId = ptr.String(xtv) } + case strings.EqualFold("rebootMigration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.RebootMigration = types.InstanceRebootMigrationState(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -186580,6 +191500,58 @@ func awsEc2query_deserializeOpDocumentModifyPrivateDnsNameOptionsOutput(v **Modi return nil } +func awsEc2query_deserializeOpDocumentModifyPublicIpDnsNameOptionsOutput(v **ModifyPublicIpDnsNameOptionsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ModifyPublicIpDnsNameOptionsOutput + if *v == nil { + sv = &ModifyPublicIpDnsNameOptionsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("successful", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.Successful = ptr.Bool(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentModifyReservedInstancesOutput(v **ModifyReservedInstancesOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -188561,6 +193533,12 @@ func awsEc2query_deserializeOpDocumentPurchaseCapacityBlockOutput(v **PurchaseCa originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("capacityBlockSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentCapacityBlockSet(&sv.CapacityBlocks, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("capacityReservation", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentCapacityReservation(&sv.CapacityReservation, nodeDecoder); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json index 71b1da91e..9c52a12e2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json @@ -82,12 +82,14 @@ "api_op_CreateCustomerGateway.go", "api_op_CreateDefaultSubnet.go", "api_op_CreateDefaultVpc.go", + "api_op_CreateDelegateMacVolumeOwnershipTask.go", "api_op_CreateDhcpOptions.go", "api_op_CreateEgressOnlyInternetGateway.go", "api_op_CreateFleet.go", "api_op_CreateFlowLogs.go", "api_op_CreateFpgaImage.go", "api_op_CreateImage.go", + "api_op_CreateImageUsageReport.go", "api_op_CreateInstanceConnectEndpoint.go", "api_op_CreateInstanceEventWindow.go", "api_op_CreateInstanceExportTask.go", @@ -106,6 +108,7 @@ "api_op_CreateLocalGatewayRouteTableVpcAssociation.go", "api_op_CreateLocalGatewayVirtualInterface.go", "api_op_CreateLocalGatewayVirtualInterfaceGroup.go", + "api_op_CreateMacSystemIntegrityProtectionModificationTask.go", "api_op_CreateManagedPrefixList.go", "api_op_CreateNatGateway.go", "api_op_CreateNetworkAcl.go", @@ -172,6 +175,7 @@ "api_op_DeleteFleets.go", "api_op_DeleteFlowLogs.go", "api_op_DeleteFpgaImage.go", + "api_op_DeleteImageUsageReport.go", "api_op_DeleteInstanceConnectEndpoint.go", "api_op_DeleteInstanceEventWindow.go", "api_op_DeleteInternetGateway.go", @@ -262,6 +266,8 @@ "api_op_DescribeCapacityBlockExtensionHistory.go", "api_op_DescribeCapacityBlockExtensionOfferings.go", "api_op_DescribeCapacityBlockOfferings.go", + "api_op_DescribeCapacityBlockStatus.go", + "api_op_DescribeCapacityBlocks.go", "api_op_DescribeCapacityReservationBillingRequests.go", "api_op_DescribeCapacityReservationFleets.go", "api_op_DescribeCapacityReservations.go", @@ -296,6 +302,9 @@ "api_op_DescribeIdFormat.go", "api_op_DescribeIdentityIdFormat.go", "api_op_DescribeImageAttribute.go", + "api_op_DescribeImageReferences.go", + "api_op_DescribeImageUsageReportEntries.go", + "api_op_DescribeImageUsageReports.go", "api_op_DescribeImages.go", "api_op_DescribeImportImageTasks.go", "api_op_DescribeImportSnapshotTasks.go", @@ -330,6 +339,7 @@ "api_op_DescribeLocalGateways.go", "api_op_DescribeLockedSnapshots.go", "api_op_DescribeMacHosts.go", + "api_op_DescribeMacModificationTasks.go", "api_op_DescribeManagedPrefixLists.go", "api_op_DescribeMovingAddresses.go", "api_op_DescribeNatGateways.go", @@ -483,6 +493,7 @@ "api_op_ExportImage.go", "api_op_ExportTransitGatewayRoutes.go", "api_op_ExportVerifiedAccessInstanceClientConfiguration.go", + "api_op_GetActiveVpnTunnelStatus.go", "api_op_GetAllowedImagesSettings.go", "api_op_GetAssociatedEnclaveCertificateIamRoles.go", "api_op_GetAssociatedIpv6PoolCidrs.go", @@ -562,6 +573,7 @@ "api_op_ModifyImageAttribute.go", "api_op_ModifyInstanceAttribute.go", "api_op_ModifyInstanceCapacityReservationAttributes.go", + "api_op_ModifyInstanceConnectEndpoint.go", "api_op_ModifyInstanceCpuOptions.go", "api_op_ModifyInstanceCreditSpecification.go", "api_op_ModifyInstanceEventStartTime.go", @@ -581,6 +593,7 @@ "api_op_ModifyManagedPrefixList.go", "api_op_ModifyNetworkInterfaceAttribute.go", "api_op_ModifyPrivateDnsNameOptions.go", + "api_op_ModifyPublicIpDnsNameOptions.go", "api_op_ModifyReservedInstances.go", "api_op_ModifyRouteServer.go", "api_op_ModifySecurityGroupRules.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go index 62604edc1..ab33eb290 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go @@ -3,4 +3,4 @@ package ec2 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.218.0" +const goModuleVersion = "1.249.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints/endpoints.go index c433d7c89..e88864c86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints/endpoints.go @@ -159,6 +159,9 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "ec2.ap-east-1.api.aws", }, + endpoints.EndpointKey{ + Region: "ap-east-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-northeast-1", }: endpoints.Endpoint{}, @@ -219,6 +222,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-5", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-6", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-southeast-7", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go index 82566bc8b..3c934b008 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/options.go @@ -123,12 +123,18 @@ type Options struct { // implementation if nil. HTTPClient HTTPClient + // Client registry of operation interceptors. + Interceptors smithyhttp.InterceptorRegistry + // The auth scheme resolver which determines how to authenticate for each // operation. AuthSchemeResolver AuthSchemeResolver // The list of auth schemes supported by the client. AuthSchemes []smithyhttp.AuthScheme + + // Priority list of preferred auth scheme names (e.g. sigv4a). + AuthSchemePreference []string } // Copy creates a clone where the APIOptions list is deep copied. @@ -136,6 +142,7 @@ func (o Options) Copy() Options { to := o to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) copy(to.APIOptions, o.APIOptions) + to.Interceptors = o.Interceptors.Copy() return to } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go index 89eff46b3..116e11217 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go @@ -4988,6 +4988,76 @@ func (m *awsEc2query_serializeOpCreateDefaultVpc) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } +type awsEc2query_serializeOpCreateDelegateMacVolumeOwnershipTask struct { +} + +func (*awsEc2query_serializeOpCreateDelegateMacVolumeOwnershipTask) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpCreateDelegateMacVolumeOwnershipTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateDelegateMacVolumeOwnershipTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("CreateDelegateMacVolumeOwnershipTask") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentCreateDelegateMacVolumeOwnershipTaskInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsEc2query_serializeOpCreateDhcpOptions struct { } @@ -5408,6 +5478,76 @@ func (m *awsEc2query_serializeOpCreateImage) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } +type awsEc2query_serializeOpCreateImageUsageReport struct { +} + +func (*awsEc2query_serializeOpCreateImageUsageReport) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpCreateImageUsageReport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateImageUsageReportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("CreateImageUsageReport") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentCreateImageUsageReportInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsEc2query_serializeOpCreateInstanceConnectEndpoint struct { } @@ -6668,6 +6808,76 @@ func (m *awsEc2query_serializeOpCreateLocalGatewayVirtualInterfaceGroup) HandleS return next.HandleSerialize(ctx, in) } +type awsEc2query_serializeOpCreateMacSystemIntegrityProtectionModificationTask struct { +} + +func (*awsEc2query_serializeOpCreateMacSystemIntegrityProtectionModificationTask) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpCreateMacSystemIntegrityProtectionModificationTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateMacSystemIntegrityProtectionModificationTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("CreateMacSystemIntegrityProtectionModificationTask") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentCreateMacSystemIntegrityProtectionModificationTaskInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + type awsEc2query_serializeOpCreateManagedPrefixList struct { } @@ -11288,14 +11498,14 @@ func (m *awsEc2query_serializeOpDeleteFpgaImage) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDeleteInstanceConnectEndpoint struct { +type awsEc2query_serializeOpDeleteImageUsageReport struct { } -func (*awsEc2query_serializeOpDeleteInstanceConnectEndpoint) ID() string { +func (*awsEc2query_serializeOpDeleteImageUsageReport) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDeleteInstanceConnectEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDeleteImageUsageReport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -11307,7 +11517,7 @@ func (m *awsEc2query_serializeOpDeleteInstanceConnectEndpoint) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DeleteInstanceConnectEndpointInput) + input, ok := in.Parameters.(*DeleteImageUsageReportInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -11332,10 +11542,10 @@ func (m *awsEc2query_serializeOpDeleteInstanceConnectEndpoint) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DeleteInstanceConnectEndpoint") + body.Key("Action").String("DeleteImageUsageReport") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDeleteInstanceConnectEndpointInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDeleteImageUsageReportInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -11358,14 +11568,14 @@ func (m *awsEc2query_serializeOpDeleteInstanceConnectEndpoint) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDeleteInstanceEventWindow struct { +type awsEc2query_serializeOpDeleteInstanceConnectEndpoint struct { } -func (*awsEc2query_serializeOpDeleteInstanceEventWindow) ID() string { +func (*awsEc2query_serializeOpDeleteInstanceConnectEndpoint) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDeleteInstanceEventWindow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDeleteInstanceConnectEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -11377,7 +11587,7 @@ func (m *awsEc2query_serializeOpDeleteInstanceEventWindow) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DeleteInstanceEventWindowInput) + input, ok := in.Parameters.(*DeleteInstanceConnectEndpointInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -11402,10 +11612,10 @@ func (m *awsEc2query_serializeOpDeleteInstanceEventWindow) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DeleteInstanceEventWindow") + body.Key("Action").String("DeleteInstanceConnectEndpoint") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDeleteInstanceEventWindowInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDeleteInstanceConnectEndpointInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -11428,14 +11638,14 @@ func (m *awsEc2query_serializeOpDeleteInstanceEventWindow) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDeleteInternetGateway struct { +type awsEc2query_serializeOpDeleteInstanceEventWindow struct { } -func (*awsEc2query_serializeOpDeleteInternetGateway) ID() string { +func (*awsEc2query_serializeOpDeleteInstanceEventWindow) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDeleteInternetGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDeleteInstanceEventWindow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -11447,7 +11657,7 @@ func (m *awsEc2query_serializeOpDeleteInternetGateway) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DeleteInternetGatewayInput) + input, ok := in.Parameters.(*DeleteInstanceEventWindowInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -11472,10 +11682,10 @@ func (m *awsEc2query_serializeOpDeleteInternetGateway) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DeleteInternetGateway") + body.Key("Action").String("DeleteInstanceEventWindow") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDeleteInternetGatewayInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDeleteInstanceEventWindowInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -11498,14 +11708,14 @@ func (m *awsEc2query_serializeOpDeleteInternetGateway) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDeleteIpam struct { +type awsEc2query_serializeOpDeleteInternetGateway struct { } -func (*awsEc2query_serializeOpDeleteIpam) ID() string { +func (*awsEc2query_serializeOpDeleteInternetGateway) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDeleteIpam) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDeleteInternetGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -11517,7 +11727,7 @@ func (m *awsEc2query_serializeOpDeleteIpam) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DeleteIpamInput) + input, ok := in.Parameters.(*DeleteInternetGatewayInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -11542,10 +11752,10 @@ func (m *awsEc2query_serializeOpDeleteIpam) HandleSerialize(ctx context.Context, bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DeleteIpam") + body.Key("Action").String("DeleteInternetGateway") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDeleteIpamInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDeleteInternetGatewayInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -11568,14 +11778,14 @@ func (m *awsEc2query_serializeOpDeleteIpam) HandleSerialize(ctx context.Context, return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken struct { +type awsEc2query_serializeOpDeleteIpam struct { } -func (*awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) ID() string { +func (*awsEc2query_serializeOpDeleteIpam) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDeleteIpam) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -11587,7 +11797,7 @@ func (m *awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DeleteIpamExternalResourceVerificationTokenInput) + input, ok := in.Parameters.(*DeleteIpamInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -11612,10 +11822,10 @@ func (m *awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DeleteIpamExternalResourceVerificationToken") + body.Key("Action").String("DeleteIpam") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDeleteIpamExternalResourceVerificationTokenInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDeleteIpamInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -11638,14 +11848,14 @@ func (m *awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDeleteIpamPool struct { +type awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken struct { } -func (*awsEc2query_serializeOpDeleteIpamPool) ID() string { +func (*awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDeleteIpamPool) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDeleteIpamExternalResourceVerificationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -11657,7 +11867,7 @@ func (m *awsEc2query_serializeOpDeleteIpamPool) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DeleteIpamPoolInput) + input, ok := in.Parameters.(*DeleteIpamExternalResourceVerificationTokenInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -11682,10 +11892,80 @@ func (m *awsEc2query_serializeOpDeleteIpamPool) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DeleteIpamPool") + body.Key("Action").String("DeleteIpamExternalResourceVerificationToken") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDeleteIpamPoolInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDeleteIpamExternalResourceVerificationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpDeleteIpamPool struct { +} + +func (*awsEc2query_serializeOpDeleteIpamPool) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpDeleteIpamPool) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteIpamPoolInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DeleteIpamPool") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentDeleteIpamPoolInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -17588,14 +17868,14 @@ func (m *awsEc2query_serializeOpDescribeCapacityBlockOfferings) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeCapacityReservationBillingRequests struct { +type awsEc2query_serializeOpDescribeCapacityBlocks struct { } -func (*awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) ID() string { +func (*awsEc2query_serializeOpDescribeCapacityBlocks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCapacityBlocks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -17607,7 +17887,7 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) Hand return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeCapacityReservationBillingRequestsInput) + input, ok := in.Parameters.(*DescribeCapacityBlocksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -17632,10 +17912,10 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) Hand bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeCapacityReservationBillingRequests") + body.Key("Action").String("DescribeCapacityBlocks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeCapacityReservationBillingRequestsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCapacityBlocksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -17658,14 +17938,14 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) Hand return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeCapacityReservationFleets struct { +type awsEc2query_serializeOpDescribeCapacityBlockStatus struct { } -func (*awsEc2query_serializeOpDescribeCapacityReservationFleets) ID() string { +func (*awsEc2query_serializeOpDescribeCapacityBlockStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeCapacityReservationFleets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCapacityBlockStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -17677,7 +17957,7 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservationFleets) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeCapacityReservationFleetsInput) + input, ok := in.Parameters.(*DescribeCapacityBlockStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -17702,10 +17982,10 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservationFleets) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeCapacityReservationFleets") + body.Key("Action").String("DescribeCapacityBlockStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeCapacityReservationFleetsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCapacityBlockStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -17728,14 +18008,14 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservationFleets) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeCapacityReservations struct { +type awsEc2query_serializeOpDescribeCapacityReservationBillingRequests struct { } -func (*awsEc2query_serializeOpDescribeCapacityReservations) ID() string { +func (*awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeCapacityReservations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCapacityReservationBillingRequests) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -17747,7 +18027,7 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservations) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeCapacityReservationsInput) + input, ok := in.Parameters.(*DescribeCapacityReservationBillingRequestsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -17772,10 +18052,10 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservations) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeCapacityReservations") + body.Key("Action").String("DescribeCapacityReservationBillingRequests") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeCapacityReservationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCapacityReservationBillingRequestsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -17798,14 +18078,14 @@ func (m *awsEc2query_serializeOpDescribeCapacityReservations) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeCarrierGateways struct { +type awsEc2query_serializeOpDescribeCapacityReservationFleets struct { } -func (*awsEc2query_serializeOpDescribeCarrierGateways) ID() string { +func (*awsEc2query_serializeOpDescribeCapacityReservationFleets) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeCarrierGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCapacityReservationFleets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -17817,7 +18097,7 @@ func (m *awsEc2query_serializeOpDescribeCarrierGateways) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeCarrierGatewaysInput) + input, ok := in.Parameters.(*DescribeCapacityReservationFleetsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -17842,10 +18122,10 @@ func (m *awsEc2query_serializeOpDescribeCarrierGateways) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeCarrierGateways") + body.Key("Action").String("DescribeCapacityReservationFleets") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeCarrierGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCapacityReservationFleetsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -17868,14 +18148,14 @@ func (m *awsEc2query_serializeOpDescribeCarrierGateways) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeClassicLinkInstances struct { +type awsEc2query_serializeOpDescribeCapacityReservations struct { } -func (*awsEc2query_serializeOpDescribeClassicLinkInstances) ID() string { +func (*awsEc2query_serializeOpDescribeCapacityReservations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeClassicLinkInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCapacityReservations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -17887,7 +18167,7 @@ func (m *awsEc2query_serializeOpDescribeClassicLinkInstances) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeClassicLinkInstancesInput) + input, ok := in.Parameters.(*DescribeCapacityReservationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -17912,10 +18192,10 @@ func (m *awsEc2query_serializeOpDescribeClassicLinkInstances) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeClassicLinkInstances") + body.Key("Action").String("DescribeCapacityReservations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeClassicLinkInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCapacityReservationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -17938,14 +18218,14 @@ func (m *awsEc2query_serializeOpDescribeClassicLinkInstances) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeClientVpnAuthorizationRules struct { +type awsEc2query_serializeOpDescribeCarrierGateways struct { } -func (*awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) ID() string { +func (*awsEc2query_serializeOpDescribeCarrierGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCarrierGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -17957,7 +18237,7 @@ func (m *awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeClientVpnAuthorizationRulesInput) + input, ok := in.Parameters.(*DescribeCarrierGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -17982,10 +18262,10 @@ func (m *awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeClientVpnAuthorizationRules") + body.Key("Action").String("DescribeCarrierGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeClientVpnAuthorizationRulesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCarrierGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18008,14 +18288,14 @@ func (m *awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeClientVpnConnections struct { +type awsEc2query_serializeOpDescribeClassicLinkInstances struct { } -func (*awsEc2query_serializeOpDescribeClientVpnConnections) ID() string { +func (*awsEc2query_serializeOpDescribeClassicLinkInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeClientVpnConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeClassicLinkInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18027,7 +18307,7 @@ func (m *awsEc2query_serializeOpDescribeClientVpnConnections) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeClientVpnConnectionsInput) + input, ok := in.Parameters.(*DescribeClassicLinkInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18052,10 +18332,10 @@ func (m *awsEc2query_serializeOpDescribeClientVpnConnections) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeClientVpnConnections") + body.Key("Action").String("DescribeClassicLinkInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeClientVpnConnectionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeClassicLinkInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18078,14 +18358,14 @@ func (m *awsEc2query_serializeOpDescribeClientVpnConnections) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeClientVpnEndpoints struct { +type awsEc2query_serializeOpDescribeClientVpnAuthorizationRules struct { } -func (*awsEc2query_serializeOpDescribeClientVpnEndpoints) ID() string { +func (*awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeClientVpnEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeClientVpnAuthorizationRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18097,7 +18377,7 @@ func (m *awsEc2query_serializeOpDescribeClientVpnEndpoints) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeClientVpnEndpointsInput) + input, ok := in.Parameters.(*DescribeClientVpnAuthorizationRulesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18122,10 +18402,10 @@ func (m *awsEc2query_serializeOpDescribeClientVpnEndpoints) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeClientVpnEndpoints") + body.Key("Action").String("DescribeClientVpnAuthorizationRules") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeClientVpnEndpointsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeClientVpnAuthorizationRulesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18148,14 +18428,14 @@ func (m *awsEc2query_serializeOpDescribeClientVpnEndpoints) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeClientVpnRoutes struct { +type awsEc2query_serializeOpDescribeClientVpnConnections struct { } -func (*awsEc2query_serializeOpDescribeClientVpnRoutes) ID() string { +func (*awsEc2query_serializeOpDescribeClientVpnConnections) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeClientVpnRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeClientVpnConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18167,7 +18447,7 @@ func (m *awsEc2query_serializeOpDescribeClientVpnRoutes) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeClientVpnRoutesInput) + input, ok := in.Parameters.(*DescribeClientVpnConnectionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18192,10 +18472,10 @@ func (m *awsEc2query_serializeOpDescribeClientVpnRoutes) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeClientVpnRoutes") + body.Key("Action").String("DescribeClientVpnConnections") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeClientVpnRoutesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeClientVpnConnectionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18218,14 +18498,14 @@ func (m *awsEc2query_serializeOpDescribeClientVpnRoutes) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeClientVpnTargetNetworks struct { +type awsEc2query_serializeOpDescribeClientVpnEndpoints struct { } -func (*awsEc2query_serializeOpDescribeClientVpnTargetNetworks) ID() string { +func (*awsEc2query_serializeOpDescribeClientVpnEndpoints) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeClientVpnTargetNetworks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeClientVpnEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18237,7 +18517,7 @@ func (m *awsEc2query_serializeOpDescribeClientVpnTargetNetworks) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeClientVpnTargetNetworksInput) + input, ok := in.Parameters.(*DescribeClientVpnEndpointsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18262,10 +18542,10 @@ func (m *awsEc2query_serializeOpDescribeClientVpnTargetNetworks) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeClientVpnTargetNetworks") + body.Key("Action").String("DescribeClientVpnEndpoints") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeClientVpnTargetNetworksInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeClientVpnEndpointsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18288,14 +18568,14 @@ func (m *awsEc2query_serializeOpDescribeClientVpnTargetNetworks) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeCoipPools struct { +type awsEc2query_serializeOpDescribeClientVpnRoutes struct { } -func (*awsEc2query_serializeOpDescribeCoipPools) ID() string { +func (*awsEc2query_serializeOpDescribeClientVpnRoutes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeCoipPools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeClientVpnRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18307,7 +18587,7 @@ func (m *awsEc2query_serializeOpDescribeCoipPools) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeCoipPoolsInput) + input, ok := in.Parameters.(*DescribeClientVpnRoutesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18332,10 +18612,10 @@ func (m *awsEc2query_serializeOpDescribeCoipPools) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeCoipPools") + body.Key("Action").String("DescribeClientVpnRoutes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeCoipPoolsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeClientVpnRoutesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18358,14 +18638,14 @@ func (m *awsEc2query_serializeOpDescribeCoipPools) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeConversionTasks struct { +type awsEc2query_serializeOpDescribeClientVpnTargetNetworks struct { } -func (*awsEc2query_serializeOpDescribeConversionTasks) ID() string { +func (*awsEc2query_serializeOpDescribeClientVpnTargetNetworks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeConversionTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeClientVpnTargetNetworks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18377,7 +18657,7 @@ func (m *awsEc2query_serializeOpDescribeConversionTasks) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeConversionTasksInput) + input, ok := in.Parameters.(*DescribeClientVpnTargetNetworksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18402,10 +18682,10 @@ func (m *awsEc2query_serializeOpDescribeConversionTasks) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeConversionTasks") + body.Key("Action").String("DescribeClientVpnTargetNetworks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeConversionTasksInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeClientVpnTargetNetworksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18428,14 +18708,14 @@ func (m *awsEc2query_serializeOpDescribeConversionTasks) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeCustomerGateways struct { +type awsEc2query_serializeOpDescribeCoipPools struct { } -func (*awsEc2query_serializeOpDescribeCustomerGateways) ID() string { +func (*awsEc2query_serializeOpDescribeCoipPools) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeCustomerGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCoipPools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18447,7 +18727,7 @@ func (m *awsEc2query_serializeOpDescribeCustomerGateways) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeCustomerGatewaysInput) + input, ok := in.Parameters.(*DescribeCoipPoolsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18472,10 +18752,10 @@ func (m *awsEc2query_serializeOpDescribeCustomerGateways) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeCustomerGateways") + body.Key("Action").String("DescribeCoipPools") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeCustomerGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCoipPoolsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18498,14 +18778,14 @@ func (m *awsEc2query_serializeOpDescribeCustomerGateways) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeDeclarativePoliciesReports struct { +type awsEc2query_serializeOpDescribeConversionTasks struct { } -func (*awsEc2query_serializeOpDescribeDeclarativePoliciesReports) ID() string { +func (*awsEc2query_serializeOpDescribeConversionTasks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeDeclarativePoliciesReports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeConversionTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18517,7 +18797,7 @@ func (m *awsEc2query_serializeOpDescribeDeclarativePoliciesReports) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeDeclarativePoliciesReportsInput) + input, ok := in.Parameters.(*DescribeConversionTasksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18542,10 +18822,10 @@ func (m *awsEc2query_serializeOpDescribeDeclarativePoliciesReports) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeDeclarativePoliciesReports") + body.Key("Action").String("DescribeConversionTasks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeDeclarativePoliciesReportsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeConversionTasksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18568,14 +18848,14 @@ func (m *awsEc2query_serializeOpDescribeDeclarativePoliciesReports) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeDhcpOptions struct { +type awsEc2query_serializeOpDescribeCustomerGateways struct { } -func (*awsEc2query_serializeOpDescribeDhcpOptions) ID() string { +func (*awsEc2query_serializeOpDescribeCustomerGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeDhcpOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeCustomerGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18587,7 +18867,7 @@ func (m *awsEc2query_serializeOpDescribeDhcpOptions) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeDhcpOptionsInput) + input, ok := in.Parameters.(*DescribeCustomerGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18612,10 +18892,10 @@ func (m *awsEc2query_serializeOpDescribeDhcpOptions) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeDhcpOptions") + body.Key("Action").String("DescribeCustomerGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeDhcpOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeCustomerGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18638,14 +18918,14 @@ func (m *awsEc2query_serializeOpDescribeDhcpOptions) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeEgressOnlyInternetGateways struct { +type awsEc2query_serializeOpDescribeDeclarativePoliciesReports struct { } -func (*awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) ID() string { +func (*awsEc2query_serializeOpDescribeDeclarativePoliciesReports) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeDeclarativePoliciesReports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18657,7 +18937,7 @@ func (m *awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeEgressOnlyInternetGatewaysInput) + input, ok := in.Parameters.(*DescribeDeclarativePoliciesReportsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -18682,10 +18962,10 @@ func (m *awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeEgressOnlyInternetGateways") + body.Key("Action").String("DescribeDeclarativePoliciesReports") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeEgressOnlyInternetGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeDeclarativePoliciesReportsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -18708,14 +18988,14 @@ func (m *awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeElasticGpus struct { +type awsEc2query_serializeOpDescribeDhcpOptions struct { } -func (*awsEc2query_serializeOpDescribeElasticGpus) ID() string { +func (*awsEc2query_serializeOpDescribeDhcpOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeElasticGpus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeDhcpOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -18727,7 +19007,147 @@ func (m *awsEc2query_serializeOpDescribeElasticGpus) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeElasticGpusInput) + input, ok := in.Parameters.(*DescribeDhcpOptionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DescribeDhcpOptions") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentDescribeDhcpOptionsInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpDescribeEgressOnlyInternetGateways struct { +} + +func (*awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpDescribeEgressOnlyInternetGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeEgressOnlyInternetGatewaysInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DescribeEgressOnlyInternetGateways") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentDescribeEgressOnlyInternetGatewaysInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpDescribeElasticGpus struct { +} + +func (*awsEc2query_serializeOpDescribeElasticGpus) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpDescribeElasticGpus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeElasticGpusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -19968,84 +20388,14 @@ func (m *awsEc2query_serializeOpDescribeImageAttribute) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeImages struct { -} - -func (*awsEc2query_serializeOpDescribeImages) ID() string { - return "OperationSerializer" -} - -func (m *awsEc2query_serializeOpDescribeImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DescribeImagesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("DescribeImages") - body.Key("Version").String("2016-11-15") - - if err := awsEc2query_serializeOpDocumentDescribeImagesInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsEc2query_serializeOpDescribeImportImageTasks struct { +type awsEc2query_serializeOpDescribeImageReferences struct { } -func (*awsEc2query_serializeOpDescribeImportImageTasks) ID() string { +func (*awsEc2query_serializeOpDescribeImageReferences) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeImportImageTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeImageReferences) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20057,7 +20407,7 @@ func (m *awsEc2query_serializeOpDescribeImportImageTasks) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeImportImageTasksInput) + input, ok := in.Parameters.(*DescribeImageReferencesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20082,10 +20432,10 @@ func (m *awsEc2query_serializeOpDescribeImportImageTasks) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeImportImageTasks") + body.Key("Action").String("DescribeImageReferences") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeImportImageTasksInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeImageReferencesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20108,14 +20458,14 @@ func (m *awsEc2query_serializeOpDescribeImportImageTasks) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeImportSnapshotTasks struct { +type awsEc2query_serializeOpDescribeImages struct { } -func (*awsEc2query_serializeOpDescribeImportSnapshotTasks) ID() string { +func (*awsEc2query_serializeOpDescribeImages) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeImportSnapshotTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeImages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20127,7 +20477,7 @@ func (m *awsEc2query_serializeOpDescribeImportSnapshotTasks) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeImportSnapshotTasksInput) + input, ok := in.Parameters.(*DescribeImagesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20152,10 +20502,10 @@ func (m *awsEc2query_serializeOpDescribeImportSnapshotTasks) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeImportSnapshotTasks") + body.Key("Action").String("DescribeImages") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeImportSnapshotTasksInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeImagesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20178,14 +20528,14 @@ func (m *awsEc2query_serializeOpDescribeImportSnapshotTasks) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceAttribute struct { +type awsEc2query_serializeOpDescribeImageUsageReportEntries struct { } -func (*awsEc2query_serializeOpDescribeInstanceAttribute) ID() string { +func (*awsEc2query_serializeOpDescribeImageUsageReportEntries) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeImageUsageReportEntries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20197,7 +20547,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceAttribute) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceAttributeInput) + input, ok := in.Parameters.(*DescribeImageUsageReportEntriesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20222,10 +20572,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceAttribute) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceAttribute") + body.Key("Action").String("DescribeImageUsageReportEntries") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeImageUsageReportEntriesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20248,14 +20598,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceAttribute) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceConnectEndpoints struct { +type awsEc2query_serializeOpDescribeImageUsageReports struct { } -func (*awsEc2query_serializeOpDescribeInstanceConnectEndpoints) ID() string { +func (*awsEc2query_serializeOpDescribeImageUsageReports) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceConnectEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeImageUsageReports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20267,7 +20617,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceConnectEndpoints) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceConnectEndpointsInput) + input, ok := in.Parameters.(*DescribeImageUsageReportsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20292,10 +20642,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceConnectEndpoints) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceConnectEndpoints") + body.Key("Action").String("DescribeImageUsageReports") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceConnectEndpointsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeImageUsageReportsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20318,14 +20668,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceConnectEndpoints) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceCreditSpecifications struct { +type awsEc2query_serializeOpDescribeImportImageTasks struct { } -func (*awsEc2query_serializeOpDescribeInstanceCreditSpecifications) ID() string { +func (*awsEc2query_serializeOpDescribeImportImageTasks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceCreditSpecifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeImportImageTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20337,7 +20687,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceCreditSpecifications) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceCreditSpecificationsInput) + input, ok := in.Parameters.(*DescribeImportImageTasksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20362,10 +20712,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceCreditSpecifications) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceCreditSpecifications") + body.Key("Action").String("DescribeImportImageTasks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceCreditSpecificationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeImportImageTasksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20388,14 +20738,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceCreditSpecifications) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes struct { +type awsEc2query_serializeOpDescribeImportSnapshotTasks struct { } -func (*awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) ID() string { +func (*awsEc2query_serializeOpDescribeImportSnapshotTasks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeImportSnapshotTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20407,7 +20757,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceEventNotificationAttributesInput) + input, ok := in.Parameters.(*DescribeImportSnapshotTasksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20432,10 +20782,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceEventNotificationAttributes") + body.Key("Action").String("DescribeImportSnapshotTasks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceEventNotificationAttributesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeImportSnapshotTasksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20458,14 +20808,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceEventWindows struct { +type awsEc2query_serializeOpDescribeInstanceAttribute struct { } -func (*awsEc2query_serializeOpDescribeInstanceEventWindows) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceEventWindows) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20477,7 +20827,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceEventWindows) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceEventWindowsInput) + input, ok := in.Parameters.(*DescribeInstanceAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20502,10 +20852,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceEventWindows) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceEventWindows") + body.Key("Action").String("DescribeInstanceAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceEventWindowsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20528,14 +20878,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceEventWindows) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceImageMetadata struct { +type awsEc2query_serializeOpDescribeInstanceConnectEndpoints struct { } -func (*awsEc2query_serializeOpDescribeInstanceImageMetadata) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceConnectEndpoints) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceImageMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceConnectEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20547,7 +20897,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceImageMetadata) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceImageMetadataInput) + input, ok := in.Parameters.(*DescribeInstanceConnectEndpointsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20572,10 +20922,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceImageMetadata) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceImageMetadata") + body.Key("Action").String("DescribeInstanceConnectEndpoints") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceImageMetadataInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceConnectEndpointsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20598,14 +20948,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceImageMetadata) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstances struct { +type awsEc2query_serializeOpDescribeInstanceCreditSpecifications struct { } -func (*awsEc2query_serializeOpDescribeInstances) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceCreditSpecifications) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceCreditSpecifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20617,7 +20967,7 @@ func (m *awsEc2query_serializeOpDescribeInstances) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstancesInput) + input, ok := in.Parameters.(*DescribeInstanceCreditSpecificationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20642,10 +20992,10 @@ func (m *awsEc2query_serializeOpDescribeInstances) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstances") + body.Key("Action").String("DescribeInstanceCreditSpecifications") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceCreditSpecificationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20668,14 +21018,14 @@ func (m *awsEc2query_serializeOpDescribeInstances) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceStatus struct { +type awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes struct { } -func (*awsEc2query_serializeOpDescribeInstanceStatus) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceEventNotificationAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20687,7 +21037,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceStatus) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceStatusInput) + input, ok := in.Parameters.(*DescribeInstanceEventNotificationAttributesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20712,10 +21062,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceStatus) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceStatus") + body.Key("Action").String("DescribeInstanceEventNotificationAttributes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceStatusInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceEventNotificationAttributesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20738,14 +21088,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceStatus) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceTopology struct { +type awsEc2query_serializeOpDescribeInstanceEventWindows struct { } -func (*awsEc2query_serializeOpDescribeInstanceTopology) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceEventWindows) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceTopology) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceEventWindows) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20757,7 +21107,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceTopology) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceTopologyInput) + input, ok := in.Parameters.(*DescribeInstanceEventWindowsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20782,10 +21132,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceTopology) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceTopology") + body.Key("Action").String("DescribeInstanceEventWindows") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceTopologyInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceEventWindowsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20808,14 +21158,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceTopology) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceTypeOfferings struct { +type awsEc2query_serializeOpDescribeInstanceImageMetadata struct { } -func (*awsEc2query_serializeOpDescribeInstanceTypeOfferings) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceImageMetadata) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceTypeOfferings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceImageMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20827,7 +21177,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceTypeOfferings) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceTypeOfferingsInput) + input, ok := in.Parameters.(*DescribeInstanceImageMetadataInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20852,10 +21202,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceTypeOfferings) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceTypeOfferings") + body.Key("Action").String("DescribeInstanceImageMetadata") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceTypeOfferingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceImageMetadataInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20878,14 +21228,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceTypeOfferings) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInstanceTypes struct { +type awsEc2query_serializeOpDescribeInstances struct { } -func (*awsEc2query_serializeOpDescribeInstanceTypes) ID() string { +func (*awsEc2query_serializeOpDescribeInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInstanceTypes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20897,7 +21247,7 @@ func (m *awsEc2query_serializeOpDescribeInstanceTypes) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInstanceTypesInput) + input, ok := in.Parameters.(*DescribeInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20922,10 +21272,10 @@ func (m *awsEc2query_serializeOpDescribeInstanceTypes) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInstanceTypes") + body.Key("Action").String("DescribeInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInstanceTypesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -20948,14 +21298,14 @@ func (m *awsEc2query_serializeOpDescribeInstanceTypes) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeInternetGateways struct { +type awsEc2query_serializeOpDescribeInstanceStatus struct { } -func (*awsEc2query_serializeOpDescribeInternetGateways) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeInternetGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -20967,7 +21317,7 @@ func (m *awsEc2query_serializeOpDescribeInternetGateways) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeInternetGatewaysInput) + input, ok := in.Parameters.(*DescribeInstanceStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -20992,10 +21342,10 @@ func (m *awsEc2query_serializeOpDescribeInternetGateways) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeInternetGateways") + body.Key("Action").String("DescribeInstanceStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeInternetGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21018,14 +21368,14 @@ func (m *awsEc2query_serializeOpDescribeInternetGateways) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpamByoasn struct { +type awsEc2query_serializeOpDescribeInstanceTopology struct { } -func (*awsEc2query_serializeOpDescribeIpamByoasn) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceTopology) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpamByoasn) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceTopology) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21037,7 +21387,7 @@ func (m *awsEc2query_serializeOpDescribeIpamByoasn) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamByoasnInput) + input, ok := in.Parameters.(*DescribeInstanceTopologyInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21062,10 +21412,10 @@ func (m *awsEc2query_serializeOpDescribeIpamByoasn) HandleSerialize(ctx context. bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpamByoasn") + body.Key("Action").String("DescribeInstanceTopology") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamByoasnInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceTopologyInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21088,14 +21438,14 @@ func (m *awsEc2query_serializeOpDescribeIpamByoasn) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens struct { +type awsEc2query_serializeOpDescribeInstanceTypeOfferings struct { } -func (*awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceTypeOfferings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceTypeOfferings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21107,7 +21457,7 @@ func (m *awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamExternalResourceVerificationTokensInput) + input, ok := in.Parameters.(*DescribeInstanceTypeOfferingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21132,10 +21482,10 @@ func (m *awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpamExternalResourceVerificationTokens") + body.Key("Action").String("DescribeInstanceTypeOfferings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamExternalResourceVerificationTokensInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceTypeOfferingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21158,14 +21508,14 @@ func (m *awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpamPools struct { +type awsEc2query_serializeOpDescribeInstanceTypes struct { } -func (*awsEc2query_serializeOpDescribeIpamPools) ID() string { +func (*awsEc2query_serializeOpDescribeInstanceTypes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpamPools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInstanceTypes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21177,7 +21527,7 @@ func (m *awsEc2query_serializeOpDescribeIpamPools) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamPoolsInput) + input, ok := in.Parameters.(*DescribeInstanceTypesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21202,10 +21552,10 @@ func (m *awsEc2query_serializeOpDescribeIpamPools) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpamPools") + body.Key("Action").String("DescribeInstanceTypes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamPoolsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInstanceTypesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21228,14 +21578,14 @@ func (m *awsEc2query_serializeOpDescribeIpamPools) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpamResourceDiscoveries struct { +type awsEc2query_serializeOpDescribeInternetGateways struct { } -func (*awsEc2query_serializeOpDescribeIpamResourceDiscoveries) ID() string { +func (*awsEc2query_serializeOpDescribeInternetGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeInternetGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21247,7 +21597,7 @@ func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveries) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamResourceDiscoveriesInput) + input, ok := in.Parameters.(*DescribeInternetGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21272,10 +21622,10 @@ func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveries) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpamResourceDiscoveries") + body.Key("Action").String("DescribeInternetGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamResourceDiscoveriesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeInternetGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21298,14 +21648,14 @@ func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveries) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations struct { +type awsEc2query_serializeOpDescribeIpamByoasn struct { } -func (*awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) ID() string { +func (*awsEc2query_serializeOpDescribeIpamByoasn) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpamByoasn) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21317,7 +21667,7 @@ func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) Handl return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamResourceDiscoveryAssociationsInput) + input, ok := in.Parameters.(*DescribeIpamByoasnInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21342,10 +21692,10 @@ func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) Handl bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpamResourceDiscoveryAssociations") + body.Key("Action").String("DescribeIpamByoasn") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamResourceDiscoveryAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamByoasnInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21368,14 +21718,14 @@ func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) Handl return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpams struct { +type awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens struct { } -func (*awsEc2query_serializeOpDescribeIpams) ID() string { +func (*awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpams) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpamExternalResourceVerificationTokens) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21387,7 +21737,7 @@ func (m *awsEc2query_serializeOpDescribeIpams) HandleSerialize(ctx context.Conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamsInput) + input, ok := in.Parameters.(*DescribeIpamExternalResourceVerificationTokensInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21412,10 +21762,10 @@ func (m *awsEc2query_serializeOpDescribeIpams) HandleSerialize(ctx context.Conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpams") + body.Key("Action").String("DescribeIpamExternalResourceVerificationTokens") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamExternalResourceVerificationTokensInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21438,14 +21788,14 @@ func (m *awsEc2query_serializeOpDescribeIpams) HandleSerialize(ctx context.Conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpamScopes struct { +type awsEc2query_serializeOpDescribeIpamPools struct { } -func (*awsEc2query_serializeOpDescribeIpamScopes) ID() string { +func (*awsEc2query_serializeOpDescribeIpamPools) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpamScopes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpamPools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21457,7 +21807,7 @@ func (m *awsEc2query_serializeOpDescribeIpamScopes) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpamScopesInput) + input, ok := in.Parameters.(*DescribeIpamPoolsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21482,10 +21832,10 @@ func (m *awsEc2query_serializeOpDescribeIpamScopes) HandleSerialize(ctx context. bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpamScopes") + body.Key("Action").String("DescribeIpamPools") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpamScopesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamPoolsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21508,14 +21858,14 @@ func (m *awsEc2query_serializeOpDescribeIpamScopes) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeIpv6Pools struct { +type awsEc2query_serializeOpDescribeIpamResourceDiscoveries struct { } -func (*awsEc2query_serializeOpDescribeIpv6Pools) ID() string { +func (*awsEc2query_serializeOpDescribeIpamResourceDiscoveries) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeIpv6Pools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21527,7 +21877,7 @@ func (m *awsEc2query_serializeOpDescribeIpv6Pools) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeIpv6PoolsInput) + input, ok := in.Parameters.(*DescribeIpamResourceDiscoveriesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21552,10 +21902,10 @@ func (m *awsEc2query_serializeOpDescribeIpv6Pools) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeIpv6Pools") + body.Key("Action").String("DescribeIpamResourceDiscoveries") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeIpv6PoolsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamResourceDiscoveriesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21578,14 +21928,14 @@ func (m *awsEc2query_serializeOpDescribeIpv6Pools) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeKeyPairs struct { +type awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations struct { } -func (*awsEc2query_serializeOpDescribeKeyPairs) ID() string { +func (*awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeKeyPairs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpamResourceDiscoveryAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21597,7 +21947,7 @@ func (m *awsEc2query_serializeOpDescribeKeyPairs) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeKeyPairsInput) + input, ok := in.Parameters.(*DescribeIpamResourceDiscoveryAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21622,10 +21972,10 @@ func (m *awsEc2query_serializeOpDescribeKeyPairs) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeKeyPairs") + body.Key("Action").String("DescribeIpamResourceDiscoveryAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeKeyPairsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamResourceDiscoveryAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21648,14 +21998,14 @@ func (m *awsEc2query_serializeOpDescribeKeyPairs) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLaunchTemplates struct { +type awsEc2query_serializeOpDescribeIpams struct { } -func (*awsEc2query_serializeOpDescribeLaunchTemplates) ID() string { +func (*awsEc2query_serializeOpDescribeIpams) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLaunchTemplates) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpams) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21667,7 +22017,7 @@ func (m *awsEc2query_serializeOpDescribeLaunchTemplates) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLaunchTemplatesInput) + input, ok := in.Parameters.(*DescribeIpamsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21692,10 +22042,10 @@ func (m *awsEc2query_serializeOpDescribeLaunchTemplates) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLaunchTemplates") + body.Key("Action").String("DescribeIpams") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLaunchTemplatesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21718,14 +22068,14 @@ func (m *awsEc2query_serializeOpDescribeLaunchTemplates) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLaunchTemplateVersions struct { +type awsEc2query_serializeOpDescribeIpamScopes struct { } -func (*awsEc2query_serializeOpDescribeLaunchTemplateVersions) ID() string { +func (*awsEc2query_serializeOpDescribeIpamScopes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLaunchTemplateVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpamScopes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21737,7 +22087,7 @@ func (m *awsEc2query_serializeOpDescribeLaunchTemplateVersions) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLaunchTemplateVersionsInput) + input, ok := in.Parameters.(*DescribeIpamScopesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21762,10 +22112,10 @@ func (m *awsEc2query_serializeOpDescribeLaunchTemplateVersions) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLaunchTemplateVersions") + body.Key("Action").String("DescribeIpamScopes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLaunchTemplateVersionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpamScopesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21788,14 +22138,14 @@ func (m *awsEc2query_serializeOpDescribeLaunchTemplateVersions) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLocalGatewayRouteTables struct { +type awsEc2query_serializeOpDescribeIpv6Pools struct { } -func (*awsEc2query_serializeOpDescribeLocalGatewayRouteTables) ID() string { +func (*awsEc2query_serializeOpDescribeIpv6Pools) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeIpv6Pools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21807,7 +22157,7 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTables) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLocalGatewayRouteTablesInput) + input, ok := in.Parameters.(*DescribeIpv6PoolsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21832,10 +22182,10 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTables) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLocalGatewayRouteTables") + body.Key("Action").String("DescribeIpv6Pools") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayRouteTablesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeIpv6PoolsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21858,14 +22208,14 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTables) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations struct { +type awsEc2query_serializeOpDescribeKeyPairs struct { } -func (*awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) ID() string { +func (*awsEc2query_serializeOpDescribeKeyPairs) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeKeyPairs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21877,7 +22227,7 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGr return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) + input, ok := in.Parameters.(*DescribeKeyPairsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21902,10 +22252,10 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGr bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations") + body.Key("Action").String("DescribeKeyPairs") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeKeyPairsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21928,14 +22278,14 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGr return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations struct { +type awsEc2query_serializeOpDescribeLaunchTemplates struct { } -func (*awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) ID() string { +func (*awsEc2query_serializeOpDescribeLaunchTemplates) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLaunchTemplates) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -21947,7 +22297,7 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) H return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLocalGatewayRouteTableVpcAssociationsInput) + input, ok := in.Parameters.(*DescribeLaunchTemplatesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -21972,10 +22322,10 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) H bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLocalGatewayRouteTableVpcAssociations") + body.Key("Action").String("DescribeLaunchTemplates") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLaunchTemplatesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -21998,14 +22348,14 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) H return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLocalGateways struct { +type awsEc2query_serializeOpDescribeLaunchTemplateVersions struct { } -func (*awsEc2query_serializeOpDescribeLocalGateways) ID() string { +func (*awsEc2query_serializeOpDescribeLaunchTemplateVersions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLocalGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLaunchTemplateVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22017,7 +22367,7 @@ func (m *awsEc2query_serializeOpDescribeLocalGateways) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLocalGatewaysInput) + input, ok := in.Parameters.(*DescribeLaunchTemplateVersionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22042,10 +22392,10 @@ func (m *awsEc2query_serializeOpDescribeLocalGateways) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLocalGateways") + body.Key("Action").String("DescribeLaunchTemplateVersions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLocalGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLaunchTemplateVersionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22068,14 +22418,14 @@ func (m *awsEc2query_serializeOpDescribeLocalGateways) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups struct { +type awsEc2query_serializeOpDescribeLocalGatewayRouteTables struct { } -func (*awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) ID() string { +func (*awsEc2query_serializeOpDescribeLocalGatewayRouteTables) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22087,7 +22437,7 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) Hand return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLocalGatewayVirtualInterfaceGroupsInput) + input, ok := in.Parameters.(*DescribeLocalGatewayRouteTablesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22112,10 +22462,10 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) Hand bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLocalGatewayVirtualInterfaceGroups") + body.Key("Action").String("DescribeLocalGatewayRouteTables") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroupsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayRouteTablesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22138,14 +22488,14 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) Hand return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces struct { +type awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations struct { } -func (*awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) ID() string { +func (*awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22157,7 +22507,7 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLocalGatewayVirtualInterfacesInput) + input, ok := in.Parameters.(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22182,10 +22532,10 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLocalGatewayVirtualInterfaces") + body.Key("Action").String("DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayVirtualInterfacesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22208,14 +22558,14 @@ func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeLockedSnapshots struct { +type awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations struct { } -func (*awsEc2query_serializeOpDescribeLockedSnapshots) ID() string { +func (*awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeLockedSnapshots) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLocalGatewayRouteTableVpcAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22227,7 +22577,7 @@ func (m *awsEc2query_serializeOpDescribeLockedSnapshots) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeLockedSnapshotsInput) + input, ok := in.Parameters.(*DescribeLocalGatewayRouteTableVpcAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22252,10 +22602,10 @@ func (m *awsEc2query_serializeOpDescribeLockedSnapshots) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeLockedSnapshots") + body.Key("Action").String("DescribeLocalGatewayRouteTableVpcAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeLockedSnapshotsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayRouteTableVpcAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22278,14 +22628,14 @@ func (m *awsEc2query_serializeOpDescribeLockedSnapshots) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeMacHosts struct { +type awsEc2query_serializeOpDescribeLocalGateways struct { } -func (*awsEc2query_serializeOpDescribeMacHosts) ID() string { +func (*awsEc2query_serializeOpDescribeLocalGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeMacHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLocalGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22297,7 +22647,7 @@ func (m *awsEc2query_serializeOpDescribeMacHosts) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeMacHostsInput) + input, ok := in.Parameters.(*DescribeLocalGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22322,10 +22672,10 @@ func (m *awsEc2query_serializeOpDescribeMacHosts) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeMacHosts") + body.Key("Action").String("DescribeLocalGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeMacHostsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLocalGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22348,14 +22698,14 @@ func (m *awsEc2query_serializeOpDescribeMacHosts) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeManagedPrefixLists struct { +type awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups struct { } -func (*awsEc2query_serializeOpDescribeManagedPrefixLists) ID() string { +func (*awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeManagedPrefixLists) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaceGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22367,7 +22717,7 @@ func (m *awsEc2query_serializeOpDescribeManagedPrefixLists) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeManagedPrefixListsInput) + input, ok := in.Parameters.(*DescribeLocalGatewayVirtualInterfaceGroupsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22392,10 +22742,10 @@ func (m *awsEc2query_serializeOpDescribeManagedPrefixLists) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeManagedPrefixLists") + body.Key("Action").String("DescribeLocalGatewayVirtualInterfaceGroups") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeManagedPrefixListsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayVirtualInterfaceGroupsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22418,14 +22768,14 @@ func (m *awsEc2query_serializeOpDescribeManagedPrefixLists) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeMovingAddresses struct { +type awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces struct { } -func (*awsEc2query_serializeOpDescribeMovingAddresses) ID() string { +func (*awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeMovingAddresses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLocalGatewayVirtualInterfaces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22437,7 +22787,7 @@ func (m *awsEc2query_serializeOpDescribeMovingAddresses) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeMovingAddressesInput) + input, ok := in.Parameters.(*DescribeLocalGatewayVirtualInterfacesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22462,10 +22812,10 @@ func (m *awsEc2query_serializeOpDescribeMovingAddresses) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeMovingAddresses") + body.Key("Action").String("DescribeLocalGatewayVirtualInterfaces") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeMovingAddressesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLocalGatewayVirtualInterfacesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22488,14 +22838,14 @@ func (m *awsEc2query_serializeOpDescribeMovingAddresses) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNatGateways struct { +type awsEc2query_serializeOpDescribeLockedSnapshots struct { } -func (*awsEc2query_serializeOpDescribeNatGateways) ID() string { +func (*awsEc2query_serializeOpDescribeLockedSnapshots) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNatGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeLockedSnapshots) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22507,7 +22857,7 @@ func (m *awsEc2query_serializeOpDescribeNatGateways) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNatGatewaysInput) + input, ok := in.Parameters.(*DescribeLockedSnapshotsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22532,10 +22882,10 @@ func (m *awsEc2query_serializeOpDescribeNatGateways) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNatGateways") + body.Key("Action").String("DescribeLockedSnapshots") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNatGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeLockedSnapshotsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22558,14 +22908,14 @@ func (m *awsEc2query_serializeOpDescribeNatGateways) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkAcls struct { +type awsEc2query_serializeOpDescribeMacHosts struct { } -func (*awsEc2query_serializeOpDescribeNetworkAcls) ID() string { +func (*awsEc2query_serializeOpDescribeMacHosts) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkAcls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeMacHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22577,7 +22927,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkAcls) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkAclsInput) + input, ok := in.Parameters.(*DescribeMacHostsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22602,10 +22952,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkAcls) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkAcls") + body.Key("Action").String("DescribeMacHosts") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkAclsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeMacHostsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22628,14 +22978,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkAcls) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses struct { +type awsEc2query_serializeOpDescribeMacModificationTasks struct { } -func (*awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) ID() string { +func (*awsEc2query_serializeOpDescribeMacModificationTasks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeMacModificationTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22647,7 +22997,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) Hand return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInsightsAccessScopeAnalysesInput) + input, ok := in.Parameters.(*DescribeMacModificationTasksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22672,10 +23022,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) Hand bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInsightsAccessScopeAnalyses") + body.Key("Action").String("DescribeMacModificationTasks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsAccessScopeAnalysesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeMacModificationTasksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22698,14 +23048,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) Hand return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes struct { +type awsEc2query_serializeOpDescribeManagedPrefixLists struct { } -func (*awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) ID() string { +func (*awsEc2query_serializeOpDescribeManagedPrefixLists) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeManagedPrefixLists) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22717,7 +23067,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInsightsAccessScopesInput) + input, ok := in.Parameters.(*DescribeManagedPrefixListsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22742,10 +23092,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInsightsAccessScopes") + body.Key("Action").String("DescribeManagedPrefixLists") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsAccessScopesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeManagedPrefixListsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22768,14 +23118,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInsightsAnalyses struct { +type awsEc2query_serializeOpDescribeMovingAddresses struct { } -func (*awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) ID() string { +func (*awsEc2query_serializeOpDescribeMovingAddresses) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeMovingAddresses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22787,7 +23137,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInsightsAnalysesInput) + input, ok := in.Parameters.(*DescribeMovingAddressesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22812,10 +23162,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInsightsAnalyses") + body.Key("Action").String("DescribeMovingAddresses") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsAnalysesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeMovingAddressesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22838,14 +23188,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInsightsPaths struct { +type awsEc2query_serializeOpDescribeNatGateways struct { } -func (*awsEc2query_serializeOpDescribeNetworkInsightsPaths) ID() string { +func (*awsEc2query_serializeOpDescribeNatGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInsightsPaths) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNatGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22857,7 +23207,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsPaths) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInsightsPathsInput) + input, ok := in.Parameters.(*DescribeNatGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22882,10 +23232,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsPaths) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInsightsPaths") + body.Key("Action").String("DescribeNatGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsPathsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNatGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22908,14 +23258,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInsightsPaths) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInterfaceAttribute struct { +type awsEc2query_serializeOpDescribeNetworkAcls struct { } -func (*awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkAcls) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkAcls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22927,7 +23277,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInterfaceAttributeInput) + input, ok := in.Parameters.(*DescribeNetworkAclsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -22952,10 +23302,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInterfaceAttribute") + body.Key("Action").String("DescribeNetworkAcls") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInterfaceAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkAclsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -22978,14 +23328,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInterfacePermissions struct { +type awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses struct { } -func (*awsEc2query_serializeOpDescribeNetworkInterfacePermissions) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInterfacePermissions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopeAnalyses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -22997,7 +23347,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfacePermissions) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInterfacePermissionsInput) + input, ok := in.Parameters.(*DescribeNetworkInsightsAccessScopeAnalysesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23022,10 +23372,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfacePermissions) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInterfacePermissions") + body.Key("Action").String("DescribeNetworkInsightsAccessScopeAnalyses") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInterfacePermissionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsAccessScopeAnalysesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23048,14 +23398,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfacePermissions) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeNetworkInterfaces struct { +type awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes struct { } -func (*awsEc2query_serializeOpDescribeNetworkInterfaces) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeNetworkInterfaces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInsightsAccessScopes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23067,7 +23417,7 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfaces) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeNetworkInterfacesInput) + input, ok := in.Parameters.(*DescribeNetworkInsightsAccessScopesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23092,10 +23442,10 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfaces) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeNetworkInterfaces") + body.Key("Action").String("DescribeNetworkInsightsAccessScopes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeNetworkInterfacesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsAccessScopesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23118,14 +23468,14 @@ func (m *awsEc2query_serializeOpDescribeNetworkInterfaces) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeOutpostLags struct { +type awsEc2query_serializeOpDescribeNetworkInsightsAnalyses struct { } -func (*awsEc2query_serializeOpDescribeOutpostLags) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeOutpostLags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInsightsAnalyses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23137,7 +23487,7 @@ func (m *awsEc2query_serializeOpDescribeOutpostLags) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeOutpostLagsInput) + input, ok := in.Parameters.(*DescribeNetworkInsightsAnalysesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23162,10 +23512,10 @@ func (m *awsEc2query_serializeOpDescribeOutpostLags) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeOutpostLags") + body.Key("Action").String("DescribeNetworkInsightsAnalyses") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeOutpostLagsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsAnalysesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23188,14 +23538,14 @@ func (m *awsEc2query_serializeOpDescribeOutpostLags) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribePlacementGroups struct { +type awsEc2query_serializeOpDescribeNetworkInsightsPaths struct { } -func (*awsEc2query_serializeOpDescribePlacementGroups) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInsightsPaths) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribePlacementGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInsightsPaths) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23207,7 +23557,7 @@ func (m *awsEc2query_serializeOpDescribePlacementGroups) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribePlacementGroupsInput) + input, ok := in.Parameters.(*DescribeNetworkInsightsPathsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23232,10 +23582,10 @@ func (m *awsEc2query_serializeOpDescribePlacementGroups) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribePlacementGroups") + body.Key("Action").String("DescribeNetworkInsightsPaths") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribePlacementGroupsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInsightsPathsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23258,14 +23608,14 @@ func (m *awsEc2query_serializeOpDescribePlacementGroups) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribePrefixLists struct { +type awsEc2query_serializeOpDescribeNetworkInterfaceAttribute struct { } -func (*awsEc2query_serializeOpDescribePrefixLists) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribePrefixLists) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInterfaceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23277,7 +23627,7 @@ func (m *awsEc2query_serializeOpDescribePrefixLists) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribePrefixListsInput) + input, ok := in.Parameters.(*DescribeNetworkInterfaceAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23302,10 +23652,10 @@ func (m *awsEc2query_serializeOpDescribePrefixLists) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribePrefixLists") + body.Key("Action").String("DescribeNetworkInterfaceAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribePrefixListsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInterfaceAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23328,14 +23678,14 @@ func (m *awsEc2query_serializeOpDescribePrefixLists) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribePrincipalIdFormat struct { +type awsEc2query_serializeOpDescribeNetworkInterfacePermissions struct { } -func (*awsEc2query_serializeOpDescribePrincipalIdFormat) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInterfacePermissions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribePrincipalIdFormat) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInterfacePermissions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23347,7 +23697,7 @@ func (m *awsEc2query_serializeOpDescribePrincipalIdFormat) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribePrincipalIdFormatInput) + input, ok := in.Parameters.(*DescribeNetworkInterfacePermissionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23372,10 +23722,10 @@ func (m *awsEc2query_serializeOpDescribePrincipalIdFormat) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribePrincipalIdFormat") + body.Key("Action").String("DescribeNetworkInterfacePermissions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribePrincipalIdFormatInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInterfacePermissionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23398,14 +23748,14 @@ func (m *awsEc2query_serializeOpDescribePrincipalIdFormat) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribePublicIpv4Pools struct { +type awsEc2query_serializeOpDescribeNetworkInterfaces struct { } -func (*awsEc2query_serializeOpDescribePublicIpv4Pools) ID() string { +func (*awsEc2query_serializeOpDescribeNetworkInterfaces) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribePublicIpv4Pools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeNetworkInterfaces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23417,7 +23767,7 @@ func (m *awsEc2query_serializeOpDescribePublicIpv4Pools) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribePublicIpv4PoolsInput) + input, ok := in.Parameters.(*DescribeNetworkInterfacesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23442,10 +23792,10 @@ func (m *awsEc2query_serializeOpDescribePublicIpv4Pools) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribePublicIpv4Pools") + body.Key("Action").String("DescribeNetworkInterfaces") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribePublicIpv4PoolsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeNetworkInterfacesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23468,14 +23818,14 @@ func (m *awsEc2query_serializeOpDescribePublicIpv4Pools) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeRegions struct { +type awsEc2query_serializeOpDescribeOutpostLags struct { } -func (*awsEc2query_serializeOpDescribeRegions) ID() string { +func (*awsEc2query_serializeOpDescribeOutpostLags) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeRegions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeOutpostLags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23487,7 +23837,7 @@ func (m *awsEc2query_serializeOpDescribeRegions) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeRegionsInput) + input, ok := in.Parameters.(*DescribeOutpostLagsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23512,10 +23862,10 @@ func (m *awsEc2query_serializeOpDescribeRegions) HandleSerialize(ctx context.Con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeRegions") + body.Key("Action").String("DescribeOutpostLags") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeRegionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeOutpostLagsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23538,14 +23888,14 @@ func (m *awsEc2query_serializeOpDescribeRegions) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeReplaceRootVolumeTasks struct { +type awsEc2query_serializeOpDescribePlacementGroups struct { } -func (*awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) ID() string { +func (*awsEc2query_serializeOpDescribePlacementGroups) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribePlacementGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23557,7 +23907,7 @@ func (m *awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeReplaceRootVolumeTasksInput) + input, ok := in.Parameters.(*DescribePlacementGroupsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23582,10 +23932,10 @@ func (m *awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeReplaceRootVolumeTasks") + body.Key("Action").String("DescribePlacementGroups") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeReplaceRootVolumeTasksInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribePlacementGroupsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23608,14 +23958,14 @@ func (m *awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeReservedInstances struct { +type awsEc2query_serializeOpDescribePrefixLists struct { } -func (*awsEc2query_serializeOpDescribeReservedInstances) ID() string { +func (*awsEc2query_serializeOpDescribePrefixLists) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeReservedInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribePrefixLists) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23627,7 +23977,7 @@ func (m *awsEc2query_serializeOpDescribeReservedInstances) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeReservedInstancesInput) + input, ok := in.Parameters.(*DescribePrefixListsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23652,10 +24002,10 @@ func (m *awsEc2query_serializeOpDescribeReservedInstances) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeReservedInstances") + body.Key("Action").String("DescribePrefixLists") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribePrefixListsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23678,14 +24028,14 @@ func (m *awsEc2query_serializeOpDescribeReservedInstances) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeReservedInstancesListings struct { +type awsEc2query_serializeOpDescribePrincipalIdFormat struct { } -func (*awsEc2query_serializeOpDescribeReservedInstancesListings) ID() string { +func (*awsEc2query_serializeOpDescribePrincipalIdFormat) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeReservedInstancesListings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribePrincipalIdFormat) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23697,7 +24047,7 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesListings) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeReservedInstancesListingsInput) + input, ok := in.Parameters.(*DescribePrincipalIdFormatInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23722,10 +24072,10 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesListings) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeReservedInstancesListings") + body.Key("Action").String("DescribePrincipalIdFormat") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesListingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribePrincipalIdFormatInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23748,14 +24098,14 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesListings) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeReservedInstancesModifications struct { +type awsEc2query_serializeOpDescribePublicIpv4Pools struct { } -func (*awsEc2query_serializeOpDescribeReservedInstancesModifications) ID() string { +func (*awsEc2query_serializeOpDescribePublicIpv4Pools) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeReservedInstancesModifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribePublicIpv4Pools) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23767,7 +24117,7 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesModifications) HandleSe return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeReservedInstancesModificationsInput) + input, ok := in.Parameters.(*DescribePublicIpv4PoolsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23792,10 +24142,10 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesModifications) HandleSe bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeReservedInstancesModifications") + body.Key("Action").String("DescribePublicIpv4Pools") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesModificationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribePublicIpv4PoolsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23818,14 +24168,14 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesModifications) HandleSe return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeReservedInstancesOfferings struct { +type awsEc2query_serializeOpDescribeRegions struct { } -func (*awsEc2query_serializeOpDescribeReservedInstancesOfferings) ID() string { +func (*awsEc2query_serializeOpDescribeRegions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeReservedInstancesOfferings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeRegions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23837,7 +24187,7 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesOfferings) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeReservedInstancesOfferingsInput) + input, ok := in.Parameters.(*DescribeRegionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23862,10 +24212,10 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesOfferings) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeReservedInstancesOfferings") + body.Key("Action").String("DescribeRegions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesOfferingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeRegionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23888,14 +24238,14 @@ func (m *awsEc2query_serializeOpDescribeReservedInstancesOfferings) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeRouteServerEndpoints struct { +type awsEc2query_serializeOpDescribeReplaceRootVolumeTasks struct { } -func (*awsEc2query_serializeOpDescribeRouteServerEndpoints) ID() string { +func (*awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeRouteServerEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeReplaceRootVolumeTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23907,7 +24257,7 @@ func (m *awsEc2query_serializeOpDescribeRouteServerEndpoints) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeRouteServerEndpointsInput) + input, ok := in.Parameters.(*DescribeReplaceRootVolumeTasksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -23932,10 +24282,10 @@ func (m *awsEc2query_serializeOpDescribeRouteServerEndpoints) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeRouteServerEndpoints") + body.Key("Action").String("DescribeReplaceRootVolumeTasks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeRouteServerEndpointsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeReplaceRootVolumeTasksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -23958,14 +24308,14 @@ func (m *awsEc2query_serializeOpDescribeRouteServerEndpoints) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeRouteServerPeers struct { +type awsEc2query_serializeOpDescribeReservedInstances struct { } -func (*awsEc2query_serializeOpDescribeRouteServerPeers) ID() string { +func (*awsEc2query_serializeOpDescribeReservedInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeRouteServerPeers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeReservedInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -23977,7 +24327,7 @@ func (m *awsEc2query_serializeOpDescribeRouteServerPeers) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeRouteServerPeersInput) + input, ok := in.Parameters.(*DescribeReservedInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24002,10 +24352,10 @@ func (m *awsEc2query_serializeOpDescribeRouteServerPeers) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeRouteServerPeers") + body.Key("Action").String("DescribeReservedInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeRouteServerPeersInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24028,14 +24378,14 @@ func (m *awsEc2query_serializeOpDescribeRouteServerPeers) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeRouteServers struct { +type awsEc2query_serializeOpDescribeReservedInstancesListings struct { } -func (*awsEc2query_serializeOpDescribeRouteServers) ID() string { +func (*awsEc2query_serializeOpDescribeReservedInstancesListings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeRouteServers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeReservedInstancesListings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24047,7 +24397,7 @@ func (m *awsEc2query_serializeOpDescribeRouteServers) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeRouteServersInput) + input, ok := in.Parameters.(*DescribeReservedInstancesListingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24072,10 +24422,10 @@ func (m *awsEc2query_serializeOpDescribeRouteServers) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeRouteServers") + body.Key("Action").String("DescribeReservedInstancesListings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeRouteServersInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesListingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24098,14 +24448,14 @@ func (m *awsEc2query_serializeOpDescribeRouteServers) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeRouteTables struct { +type awsEc2query_serializeOpDescribeReservedInstancesModifications struct { } -func (*awsEc2query_serializeOpDescribeRouteTables) ID() string { +func (*awsEc2query_serializeOpDescribeReservedInstancesModifications) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeRouteTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeReservedInstancesModifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24117,7 +24467,7 @@ func (m *awsEc2query_serializeOpDescribeRouteTables) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeRouteTablesInput) + input, ok := in.Parameters.(*DescribeReservedInstancesModificationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24142,10 +24492,10 @@ func (m *awsEc2query_serializeOpDescribeRouteTables) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeRouteTables") + body.Key("Action").String("DescribeReservedInstancesModifications") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeRouteTablesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesModificationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24168,14 +24518,14 @@ func (m *awsEc2query_serializeOpDescribeRouteTables) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeScheduledInstanceAvailability struct { +type awsEc2query_serializeOpDescribeReservedInstancesOfferings struct { } -func (*awsEc2query_serializeOpDescribeScheduledInstanceAvailability) ID() string { +func (*awsEc2query_serializeOpDescribeReservedInstancesOfferings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeScheduledInstanceAvailability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeReservedInstancesOfferings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24187,7 +24537,7 @@ func (m *awsEc2query_serializeOpDescribeScheduledInstanceAvailability) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeScheduledInstanceAvailabilityInput) + input, ok := in.Parameters.(*DescribeReservedInstancesOfferingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24212,10 +24562,10 @@ func (m *awsEc2query_serializeOpDescribeScheduledInstanceAvailability) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeScheduledInstanceAvailability") + body.Key("Action").String("DescribeReservedInstancesOfferings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeScheduledInstanceAvailabilityInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeReservedInstancesOfferingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24238,14 +24588,14 @@ func (m *awsEc2query_serializeOpDescribeScheduledInstanceAvailability) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeScheduledInstances struct { +type awsEc2query_serializeOpDescribeRouteServerEndpoints struct { } -func (*awsEc2query_serializeOpDescribeScheduledInstances) ID() string { +func (*awsEc2query_serializeOpDescribeRouteServerEndpoints) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeScheduledInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeRouteServerEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24257,7 +24607,7 @@ func (m *awsEc2query_serializeOpDescribeScheduledInstances) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeScheduledInstancesInput) + input, ok := in.Parameters.(*DescribeRouteServerEndpointsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24282,10 +24632,10 @@ func (m *awsEc2query_serializeOpDescribeScheduledInstances) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeScheduledInstances") + body.Key("Action").String("DescribeRouteServerEndpoints") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeScheduledInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeRouteServerEndpointsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24308,14 +24658,14 @@ func (m *awsEc2query_serializeOpDescribeScheduledInstances) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSecurityGroupReferences struct { +type awsEc2query_serializeOpDescribeRouteServerPeers struct { } -func (*awsEc2query_serializeOpDescribeSecurityGroupReferences) ID() string { +func (*awsEc2query_serializeOpDescribeRouteServerPeers) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSecurityGroupReferences) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeRouteServerPeers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24327,7 +24677,7 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupReferences) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSecurityGroupReferencesInput) + input, ok := in.Parameters.(*DescribeRouteServerPeersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24352,10 +24702,10 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupReferences) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSecurityGroupReferences") + body.Key("Action").String("DescribeRouteServerPeers") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupReferencesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeRouteServerPeersInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24378,14 +24728,14 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupReferences) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSecurityGroupRules struct { +type awsEc2query_serializeOpDescribeRouteServers struct { } -func (*awsEc2query_serializeOpDescribeSecurityGroupRules) ID() string { +func (*awsEc2query_serializeOpDescribeRouteServers) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSecurityGroupRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeRouteServers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24397,7 +24747,7 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupRules) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSecurityGroupRulesInput) + input, ok := in.Parameters.(*DescribeRouteServersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24422,10 +24772,10 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupRules) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSecurityGroupRules") + body.Key("Action").String("DescribeRouteServers") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupRulesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeRouteServersInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24448,14 +24798,14 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupRules) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSecurityGroups struct { +type awsEc2query_serializeOpDescribeRouteTables struct { } -func (*awsEc2query_serializeOpDescribeSecurityGroups) ID() string { +func (*awsEc2query_serializeOpDescribeRouteTables) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSecurityGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeRouteTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24467,7 +24817,7 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroups) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSecurityGroupsInput) + input, ok := in.Parameters.(*DescribeRouteTablesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24492,10 +24842,10 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroups) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSecurityGroups") + body.Key("Action").String("DescribeRouteTables") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeRouteTablesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24518,14 +24868,14 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroups) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations struct { +type awsEc2query_serializeOpDescribeScheduledInstanceAvailability struct { } -func (*awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) ID() string { +func (*awsEc2query_serializeOpDescribeScheduledInstanceAvailability) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeScheduledInstanceAvailability) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24537,7 +24887,7 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSecurityGroupVpcAssociationsInput) + input, ok := in.Parameters.(*DescribeScheduledInstanceAvailabilityInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24562,10 +24912,10 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSecurityGroupVpcAssociations") + body.Key("Action").String("DescribeScheduledInstanceAvailability") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupVpcAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeScheduledInstanceAvailabilityInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24588,14 +24938,14 @@ func (m *awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces struct { +type awsEc2query_serializeOpDescribeScheduledInstances struct { } -func (*awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) ID() string { +func (*awsEc2query_serializeOpDescribeScheduledInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeScheduledInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24607,7 +24957,7 @@ func (m *awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeServiceLinkVirtualInterfacesInput) + input, ok := in.Parameters.(*DescribeScheduledInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24632,10 +24982,10 @@ func (m *awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeServiceLinkVirtualInterfaces") + body.Key("Action").String("DescribeScheduledInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeServiceLinkVirtualInterfacesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeScheduledInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24658,14 +25008,14 @@ func (m *awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSnapshotAttribute struct { +type awsEc2query_serializeOpDescribeSecurityGroupReferences struct { } -func (*awsEc2query_serializeOpDescribeSnapshotAttribute) ID() string { +func (*awsEc2query_serializeOpDescribeSecurityGroupReferences) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSnapshotAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSecurityGroupReferences) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24677,7 +25027,7 @@ func (m *awsEc2query_serializeOpDescribeSnapshotAttribute) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSnapshotAttributeInput) + input, ok := in.Parameters.(*DescribeSecurityGroupReferencesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24702,10 +25052,10 @@ func (m *awsEc2query_serializeOpDescribeSnapshotAttribute) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSnapshotAttribute") + body.Key("Action").String("DescribeSecurityGroupReferences") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSnapshotAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupReferencesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24728,14 +25078,14 @@ func (m *awsEc2query_serializeOpDescribeSnapshotAttribute) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSnapshots struct { +type awsEc2query_serializeOpDescribeSecurityGroupRules struct { } -func (*awsEc2query_serializeOpDescribeSnapshots) ID() string { +func (*awsEc2query_serializeOpDescribeSecurityGroupRules) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSnapshots) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSecurityGroupRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24747,7 +25097,7 @@ func (m *awsEc2query_serializeOpDescribeSnapshots) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSnapshotsInput) + input, ok := in.Parameters.(*DescribeSecurityGroupRulesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24772,10 +25122,10 @@ func (m *awsEc2query_serializeOpDescribeSnapshots) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSnapshots") + body.Key("Action").String("DescribeSecurityGroupRules") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSnapshotsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupRulesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24798,14 +25148,14 @@ func (m *awsEc2query_serializeOpDescribeSnapshots) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSnapshotTierStatus struct { +type awsEc2query_serializeOpDescribeSecurityGroups struct { } -func (*awsEc2query_serializeOpDescribeSnapshotTierStatus) ID() string { +func (*awsEc2query_serializeOpDescribeSecurityGroups) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSnapshotTierStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSecurityGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24817,7 +25167,7 @@ func (m *awsEc2query_serializeOpDescribeSnapshotTierStatus) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSnapshotTierStatusInput) + input, ok := in.Parameters.(*DescribeSecurityGroupsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24842,10 +25192,10 @@ func (m *awsEc2query_serializeOpDescribeSnapshotTierStatus) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSnapshotTierStatus") + body.Key("Action").String("DescribeSecurityGroups") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSnapshotTierStatusInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24868,14 +25218,14 @@ func (m *awsEc2query_serializeOpDescribeSnapshotTierStatus) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSpotDatafeedSubscription struct { +type awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations struct { } -func (*awsEc2query_serializeOpDescribeSpotDatafeedSubscription) ID() string { +func (*awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSpotDatafeedSubscription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSecurityGroupVpcAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24887,7 +25237,7 @@ func (m *awsEc2query_serializeOpDescribeSpotDatafeedSubscription) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSpotDatafeedSubscriptionInput) + input, ok := in.Parameters.(*DescribeSecurityGroupVpcAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24912,10 +25262,10 @@ func (m *awsEc2query_serializeOpDescribeSpotDatafeedSubscription) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSpotDatafeedSubscription") + body.Key("Action").String("DescribeSecurityGroupVpcAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSpotDatafeedSubscriptionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSecurityGroupVpcAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -24938,14 +25288,14 @@ func (m *awsEc2query_serializeOpDescribeSpotDatafeedSubscription) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSpotFleetInstances struct { +type awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces struct { } -func (*awsEc2query_serializeOpDescribeSpotFleetInstances) ID() string { +func (*awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSpotFleetInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeServiceLinkVirtualInterfaces) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -24957,7 +25307,7 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetInstances) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSpotFleetInstancesInput) + input, ok := in.Parameters.(*DescribeServiceLinkVirtualInterfacesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -24982,10 +25332,10 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetInstances) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSpotFleetInstances") + body.Key("Action").String("DescribeServiceLinkVirtualInterfaces") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSpotFleetInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeServiceLinkVirtualInterfacesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25008,14 +25358,14 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetInstances) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSpotFleetRequestHistory struct { +type awsEc2query_serializeOpDescribeSnapshotAttribute struct { } -func (*awsEc2query_serializeOpDescribeSpotFleetRequestHistory) ID() string { +func (*awsEc2query_serializeOpDescribeSnapshotAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSpotFleetRequestHistory) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSnapshotAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25027,7 +25377,7 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetRequestHistory) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSpotFleetRequestHistoryInput) + input, ok := in.Parameters.(*DescribeSnapshotAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25052,10 +25402,10 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetRequestHistory) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSpotFleetRequestHistory") + body.Key("Action").String("DescribeSnapshotAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSpotFleetRequestHistoryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSnapshotAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25078,14 +25428,14 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetRequestHistory) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSpotFleetRequests struct { +type awsEc2query_serializeOpDescribeSnapshots struct { } -func (*awsEc2query_serializeOpDescribeSpotFleetRequests) ID() string { +func (*awsEc2query_serializeOpDescribeSnapshots) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSpotFleetRequests) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSnapshots) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25097,7 +25447,7 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetRequests) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSpotFleetRequestsInput) + input, ok := in.Parameters.(*DescribeSnapshotsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25122,10 +25472,10 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetRequests) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSpotFleetRequests") + body.Key("Action").String("DescribeSnapshots") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSpotFleetRequestsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSnapshotsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25148,14 +25498,14 @@ func (m *awsEc2query_serializeOpDescribeSpotFleetRequests) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSpotInstanceRequests struct { +type awsEc2query_serializeOpDescribeSnapshotTierStatus struct { } -func (*awsEc2query_serializeOpDescribeSpotInstanceRequests) ID() string { +func (*awsEc2query_serializeOpDescribeSnapshotTierStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSpotInstanceRequests) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSnapshotTierStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25167,7 +25517,7 @@ func (m *awsEc2query_serializeOpDescribeSpotInstanceRequests) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSpotInstanceRequestsInput) + input, ok := in.Parameters.(*DescribeSnapshotTierStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25192,10 +25542,10 @@ func (m *awsEc2query_serializeOpDescribeSpotInstanceRequests) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSpotInstanceRequests") + body.Key("Action").String("DescribeSnapshotTierStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSpotInstanceRequestsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSnapshotTierStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25218,14 +25568,14 @@ func (m *awsEc2query_serializeOpDescribeSpotInstanceRequests) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSpotPriceHistory struct { +type awsEc2query_serializeOpDescribeSpotDatafeedSubscription struct { } -func (*awsEc2query_serializeOpDescribeSpotPriceHistory) ID() string { +func (*awsEc2query_serializeOpDescribeSpotDatafeedSubscription) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSpotPriceHistory) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSpotDatafeedSubscription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25237,7 +25587,7 @@ func (m *awsEc2query_serializeOpDescribeSpotPriceHistory) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSpotPriceHistoryInput) + input, ok := in.Parameters.(*DescribeSpotDatafeedSubscriptionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25262,10 +25612,10 @@ func (m *awsEc2query_serializeOpDescribeSpotPriceHistory) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSpotPriceHistory") + body.Key("Action").String("DescribeSpotDatafeedSubscription") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSpotPriceHistoryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSpotDatafeedSubscriptionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25288,14 +25638,14 @@ func (m *awsEc2query_serializeOpDescribeSpotPriceHistory) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeStaleSecurityGroups struct { +type awsEc2query_serializeOpDescribeSpotFleetInstances struct { } -func (*awsEc2query_serializeOpDescribeStaleSecurityGroups) ID() string { +func (*awsEc2query_serializeOpDescribeSpotFleetInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeStaleSecurityGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSpotFleetInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25307,7 +25657,7 @@ func (m *awsEc2query_serializeOpDescribeStaleSecurityGroups) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeStaleSecurityGroupsInput) + input, ok := in.Parameters.(*DescribeSpotFleetInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25332,10 +25682,10 @@ func (m *awsEc2query_serializeOpDescribeStaleSecurityGroups) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeStaleSecurityGroups") + body.Key("Action").String("DescribeSpotFleetInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeStaleSecurityGroupsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSpotFleetInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25358,14 +25708,14 @@ func (m *awsEc2query_serializeOpDescribeStaleSecurityGroups) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeStoreImageTasks struct { +type awsEc2query_serializeOpDescribeSpotFleetRequestHistory struct { } -func (*awsEc2query_serializeOpDescribeStoreImageTasks) ID() string { +func (*awsEc2query_serializeOpDescribeSpotFleetRequestHistory) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeStoreImageTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSpotFleetRequestHistory) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25377,7 +25727,7 @@ func (m *awsEc2query_serializeOpDescribeStoreImageTasks) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeStoreImageTasksInput) + input, ok := in.Parameters.(*DescribeSpotFleetRequestHistoryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25402,10 +25752,10 @@ func (m *awsEc2query_serializeOpDescribeStoreImageTasks) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeStoreImageTasks") + body.Key("Action").String("DescribeSpotFleetRequestHistory") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeStoreImageTasksInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSpotFleetRequestHistoryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25428,14 +25778,14 @@ func (m *awsEc2query_serializeOpDescribeStoreImageTasks) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeSubnets struct { +type awsEc2query_serializeOpDescribeSpotFleetRequests struct { } -func (*awsEc2query_serializeOpDescribeSubnets) ID() string { +func (*awsEc2query_serializeOpDescribeSpotFleetRequests) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeSubnets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSpotFleetRequests) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25447,7 +25797,7 @@ func (m *awsEc2query_serializeOpDescribeSubnets) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeSubnetsInput) + input, ok := in.Parameters.(*DescribeSpotFleetRequestsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25472,10 +25822,10 @@ func (m *awsEc2query_serializeOpDescribeSubnets) HandleSerialize(ctx context.Con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeSubnets") + body.Key("Action").String("DescribeSpotFleetRequests") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeSubnetsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSpotFleetRequestsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25498,14 +25848,14 @@ func (m *awsEc2query_serializeOpDescribeSubnets) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTags struct { +type awsEc2query_serializeOpDescribeSpotInstanceRequests struct { } -func (*awsEc2query_serializeOpDescribeTags) ID() string { +func (*awsEc2query_serializeOpDescribeSpotInstanceRequests) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSpotInstanceRequests) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25517,7 +25867,7 @@ func (m *awsEc2query_serializeOpDescribeTags) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTagsInput) + input, ok := in.Parameters.(*DescribeSpotInstanceRequestsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25542,10 +25892,10 @@ func (m *awsEc2query_serializeOpDescribeTags) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTags") + body.Key("Action").String("DescribeSpotInstanceRequests") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTagsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSpotInstanceRequestsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25568,14 +25918,14 @@ func (m *awsEc2query_serializeOpDescribeTags) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTrafficMirrorFilterRules struct { +type awsEc2query_serializeOpDescribeSpotPriceHistory struct { } -func (*awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) ID() string { +func (*awsEc2query_serializeOpDescribeSpotPriceHistory) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSpotPriceHistory) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25587,7 +25937,7 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTrafficMirrorFilterRulesInput) + input, ok := in.Parameters.(*DescribeSpotPriceHistoryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25612,10 +25962,10 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTrafficMirrorFilterRules") + body.Key("Action").String("DescribeSpotPriceHistory") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorFilterRulesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSpotPriceHistoryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25638,14 +25988,14 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTrafficMirrorFilters struct { +type awsEc2query_serializeOpDescribeStaleSecurityGroups struct { } -func (*awsEc2query_serializeOpDescribeTrafficMirrorFilters) ID() string { +func (*awsEc2query_serializeOpDescribeStaleSecurityGroups) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeStaleSecurityGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25657,7 +26007,7 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilters) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTrafficMirrorFiltersInput) + input, ok := in.Parameters.(*DescribeStaleSecurityGroupsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25682,10 +26032,10 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilters) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTrafficMirrorFilters") + body.Key("Action").String("DescribeStaleSecurityGroups") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorFiltersInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeStaleSecurityGroupsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25708,14 +26058,14 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilters) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTrafficMirrorSessions struct { +type awsEc2query_serializeOpDescribeStoreImageTasks struct { } -func (*awsEc2query_serializeOpDescribeTrafficMirrorSessions) ID() string { +func (*awsEc2query_serializeOpDescribeStoreImageTasks) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTrafficMirrorSessions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeStoreImageTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25727,7 +26077,7 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorSessions) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTrafficMirrorSessionsInput) + input, ok := in.Parameters.(*DescribeStoreImageTasksInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25752,10 +26102,10 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorSessions) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTrafficMirrorSessions") + body.Key("Action").String("DescribeStoreImageTasks") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorSessionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeStoreImageTasksInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25778,14 +26128,14 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorSessions) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTrafficMirrorTargets struct { +type awsEc2query_serializeOpDescribeSubnets struct { } -func (*awsEc2query_serializeOpDescribeTrafficMirrorTargets) ID() string { +func (*awsEc2query_serializeOpDescribeSubnets) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTrafficMirrorTargets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeSubnets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25797,7 +26147,7 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorTargets) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTrafficMirrorTargetsInput) + input, ok := in.Parameters.(*DescribeSubnetsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25822,10 +26172,10 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorTargets) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTrafficMirrorTargets") + body.Key("Action").String("DescribeSubnets") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorTargetsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeSubnetsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25848,14 +26198,14 @@ func (m *awsEc2query_serializeOpDescribeTrafficMirrorTargets) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayAttachments struct { +type awsEc2query_serializeOpDescribeTags struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayAttachments) ID() string { +func (*awsEc2query_serializeOpDescribeTags) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayAttachments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25867,7 +26217,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayAttachments) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayAttachmentsInput) + input, ok := in.Parameters.(*DescribeTagsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25892,10 +26242,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayAttachments) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayAttachments") + body.Key("Action").String("DescribeTags") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayAttachmentsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTagsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25918,14 +26268,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayAttachments) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayConnectPeers struct { +type awsEc2query_serializeOpDescribeTrafficMirrorFilterRules struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) ID() string { +func (*awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilterRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -25937,7 +26287,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayConnectPeersInput) + input, ok := in.Parameters.(*DescribeTrafficMirrorFilterRulesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -25962,10 +26312,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayConnectPeers") + body.Key("Action").String("DescribeTrafficMirrorFilterRules") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayConnectPeersInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorFilterRulesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -25988,14 +26338,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayConnects struct { +type awsEc2query_serializeOpDescribeTrafficMirrorFilters struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayConnects) ID() string { +func (*awsEc2query_serializeOpDescribeTrafficMirrorFilters) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayConnects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTrafficMirrorFilters) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26007,7 +26357,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayConnects) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayConnectsInput) + input, ok := in.Parameters.(*DescribeTrafficMirrorFiltersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26032,10 +26382,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayConnects) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayConnects") + body.Key("Action").String("DescribeTrafficMirrorFilters") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayConnectsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorFiltersInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26058,14 +26408,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayConnects) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains struct { +type awsEc2query_serializeOpDescribeTrafficMirrorSessions struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) ID() string { +func (*awsEc2query_serializeOpDescribeTrafficMirrorSessions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTrafficMirrorSessions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26077,7 +26427,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) HandleSe return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayMulticastDomainsInput) + input, ok := in.Parameters.(*DescribeTrafficMirrorSessionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26102,10 +26452,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) HandleSe bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayMulticastDomains") + body.Key("Action").String("DescribeTrafficMirrorSessions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayMulticastDomainsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorSessionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26128,14 +26478,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) HandleSe return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments struct { +type awsEc2query_serializeOpDescribeTrafficMirrorTargets struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) ID() string { +func (*awsEc2query_serializeOpDescribeTrafficMirrorTargets) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTrafficMirrorTargets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26147,7 +26497,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) Handle return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayPeeringAttachmentsInput) + input, ok := in.Parameters.(*DescribeTrafficMirrorTargetsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26172,10 +26522,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) Handle bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayPeeringAttachments") + body.Key("Action").String("DescribeTrafficMirrorTargets") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayPeeringAttachmentsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTrafficMirrorTargetsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26198,14 +26548,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) Handle return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayPolicyTables struct { +type awsEc2query_serializeOpDescribeTransitGatewayAttachments struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayAttachments) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayAttachments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26217,7 +26567,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayPolicyTablesInput) + input, ok := in.Parameters.(*DescribeTransitGatewayAttachmentsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26242,10 +26592,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayPolicyTables") + body.Key("Action").String("DescribeTransitGatewayAttachments") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayPolicyTablesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayAttachmentsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26268,14 +26618,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements struct { +type awsEc2query_serializeOpDescribeTransitGatewayConnectPeers struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayConnectPeers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26287,7 +26637,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) H return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayRouteTableAnnouncementsInput) + input, ok := in.Parameters.(*DescribeTransitGatewayConnectPeersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26312,10 +26662,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) H bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayRouteTableAnnouncements") + body.Key("Action").String("DescribeTransitGatewayConnectPeers") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayRouteTableAnnouncementsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayConnectPeersInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26338,14 +26688,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) H return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayRouteTables struct { +type awsEc2query_serializeOpDescribeTransitGatewayConnects struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayRouteTables) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayConnects) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayConnects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26357,7 +26707,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTables) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayRouteTablesInput) + input, ok := in.Parameters.(*DescribeTransitGatewayConnectsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26382,10 +26732,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTables) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayRouteTables") + body.Key("Action").String("DescribeTransitGatewayConnects") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayRouteTablesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayConnectsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26408,14 +26758,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTables) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGateways struct { +type awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains struct { } -func (*awsEc2query_serializeOpDescribeTransitGateways) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayMulticastDomains) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26427,7 +26777,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGateways) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewaysInput) + input, ok := in.Parameters.(*DescribeTransitGatewayMulticastDomainsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26452,10 +26802,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGateways) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGateways") + body.Key("Action").String("DescribeTransitGatewayMulticastDomains") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayMulticastDomainsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26478,14 +26828,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGateways) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments struct { +type awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments struct { } -func (*awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayPeeringAttachments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26497,7 +26847,7 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTransitGatewayVpcAttachmentsInput) + input, ok := in.Parameters.(*DescribeTransitGatewayPeeringAttachmentsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26522,10 +26872,10 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTransitGatewayVpcAttachments") + body.Key("Action").String("DescribeTransitGatewayPeeringAttachments") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayVpcAttachmentsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayPeeringAttachmentsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26548,14 +26898,14 @@ func (m *awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeTrunkInterfaceAssociations struct { +type awsEc2query_serializeOpDescribeTransitGatewayPolicyTables struct { } -func (*awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayPolicyTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26567,7 +26917,7 @@ func (m *awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeTrunkInterfaceAssociationsInput) + input, ok := in.Parameters.(*DescribeTransitGatewayPolicyTablesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26592,10 +26942,10 @@ func (m *awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeTrunkInterfaceAssociations") + body.Key("Action").String("DescribeTransitGatewayPolicyTables") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeTrunkInterfaceAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayPolicyTablesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26618,14 +26968,14 @@ func (m *awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVerifiedAccessEndpoints struct { +type awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements struct { } -func (*awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTableAnnouncements) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26637,7 +26987,7 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVerifiedAccessEndpointsInput) + input, ok := in.Parameters.(*DescribeTransitGatewayRouteTableAnnouncementsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26662,10 +27012,10 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVerifiedAccessEndpoints") + body.Key("Action").String("DescribeTransitGatewayRouteTableAnnouncements") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessEndpointsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayRouteTableAnnouncementsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26688,14 +27038,14 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVerifiedAccessGroups struct { +type awsEc2query_serializeOpDescribeTransitGatewayRouteTables struct { } -func (*awsEc2query_serializeOpDescribeVerifiedAccessGroups) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayRouteTables) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVerifiedAccessGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayRouteTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26707,7 +27057,7 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessGroups) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVerifiedAccessGroupsInput) + input, ok := in.Parameters.(*DescribeTransitGatewayRouteTablesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26732,10 +27082,10 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessGroups) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVerifiedAccessGroups") + body.Key("Action").String("DescribeTransitGatewayRouteTables") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessGroupsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayRouteTablesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26758,14 +27108,14 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessGroups) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurations struct { +type awsEc2query_serializeOpDescribeTransitGateways struct { } -func (*awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26777,7 +27127,7 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurati return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVerifiedAccessInstanceLoggingConfigurationsInput) + input, ok := in.Parameters.(*DescribeTransitGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26802,10 +27152,10 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurati bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVerifiedAccessInstanceLoggingConfigurations") + body.Key("Action").String("DescribeTransitGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessInstanceLoggingConfigurationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26828,14 +27178,14 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurati return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVerifiedAccessInstances struct { +type awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments struct { } -func (*awsEc2query_serializeOpDescribeVerifiedAccessInstances) ID() string { +func (*awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTransitGatewayVpcAttachments) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26847,7 +27197,7 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstances) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVerifiedAccessInstancesInput) + input, ok := in.Parameters.(*DescribeTransitGatewayVpcAttachmentsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26872,10 +27222,10 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstances) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVerifiedAccessInstances") + body.Key("Action").String("DescribeTransitGatewayVpcAttachments") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTransitGatewayVpcAttachmentsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26898,14 +27248,14 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstances) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders struct { +type awsEc2query_serializeOpDescribeTrunkInterfaceAssociations struct { } -func (*awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) ID() string { +func (*awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeTrunkInterfaceAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26917,7 +27267,7 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVerifiedAccessTrustProvidersInput) + input, ok := in.Parameters.(*DescribeTrunkInterfaceAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -26942,10 +27292,10 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVerifiedAccessTrustProviders") + body.Key("Action").String("DescribeTrunkInterfaceAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessTrustProvidersInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeTrunkInterfaceAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -26968,14 +27318,14 @@ func (m *awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVolumeAttribute struct { +type awsEc2query_serializeOpDescribeVerifiedAccessEndpoints struct { } -func (*awsEc2query_serializeOpDescribeVolumeAttribute) ID() string { +func (*awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVolumeAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVerifiedAccessEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -26987,7 +27337,7 @@ func (m *awsEc2query_serializeOpDescribeVolumeAttribute) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVolumeAttributeInput) + input, ok := in.Parameters.(*DescribeVerifiedAccessEndpointsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27012,10 +27362,10 @@ func (m *awsEc2query_serializeOpDescribeVolumeAttribute) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVolumeAttribute") + body.Key("Action").String("DescribeVerifiedAccessEndpoints") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVolumeAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessEndpointsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27038,14 +27388,14 @@ func (m *awsEc2query_serializeOpDescribeVolumeAttribute) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVolumes struct { +type awsEc2query_serializeOpDescribeVerifiedAccessGroups struct { } -func (*awsEc2query_serializeOpDescribeVolumes) ID() string { +func (*awsEc2query_serializeOpDescribeVerifiedAccessGroups) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVolumes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVerifiedAccessGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27057,7 +27407,7 @@ func (m *awsEc2query_serializeOpDescribeVolumes) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVolumesInput) + input, ok := in.Parameters.(*DescribeVerifiedAccessGroupsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27082,10 +27432,10 @@ func (m *awsEc2query_serializeOpDescribeVolumes) HandleSerialize(ctx context.Con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVolumes") + body.Key("Action").String("DescribeVerifiedAccessGroups") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVolumesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessGroupsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27108,14 +27458,14 @@ func (m *awsEc2query_serializeOpDescribeVolumes) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVolumesModifications struct { +type awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurations struct { } -func (*awsEc2query_serializeOpDescribeVolumesModifications) ID() string { +func (*awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVolumesModifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstanceLoggingConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27127,7 +27477,7 @@ func (m *awsEc2query_serializeOpDescribeVolumesModifications) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVolumesModificationsInput) + input, ok := in.Parameters.(*DescribeVerifiedAccessInstanceLoggingConfigurationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27152,10 +27502,10 @@ func (m *awsEc2query_serializeOpDescribeVolumesModifications) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVolumesModifications") + body.Key("Action").String("DescribeVerifiedAccessInstanceLoggingConfigurations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVolumesModificationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessInstanceLoggingConfigurationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27178,14 +27528,14 @@ func (m *awsEc2query_serializeOpDescribeVolumesModifications) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVolumeStatus struct { +type awsEc2query_serializeOpDescribeVerifiedAccessInstances struct { } -func (*awsEc2query_serializeOpDescribeVolumeStatus) ID() string { +func (*awsEc2query_serializeOpDescribeVerifiedAccessInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVolumeStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVerifiedAccessInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27197,7 +27547,7 @@ func (m *awsEc2query_serializeOpDescribeVolumeStatus) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVolumeStatusInput) + input, ok := in.Parameters.(*DescribeVerifiedAccessInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27222,10 +27572,10 @@ func (m *awsEc2query_serializeOpDescribeVolumeStatus) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVolumeStatus") + body.Key("Action").String("DescribeVerifiedAccessInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVolumeStatusInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27248,14 +27598,14 @@ func (m *awsEc2query_serializeOpDescribeVolumeStatus) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcAttribute struct { +type awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders struct { } -func (*awsEc2query_serializeOpDescribeVpcAttribute) ID() string { +func (*awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVerifiedAccessTrustProviders) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27267,7 +27617,7 @@ func (m *awsEc2query_serializeOpDescribeVpcAttribute) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcAttributeInput) + input, ok := in.Parameters.(*DescribeVerifiedAccessTrustProvidersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27292,10 +27642,10 @@ func (m *awsEc2query_serializeOpDescribeVpcAttribute) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcAttribute") + body.Key("Action").String("DescribeVerifiedAccessTrustProviders") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVerifiedAccessTrustProvidersInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27318,14 +27668,14 @@ func (m *awsEc2query_serializeOpDescribeVpcAttribute) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions struct { +type awsEc2query_serializeOpDescribeVolumeAttribute struct { } -func (*awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) ID() string { +func (*awsEc2query_serializeOpDescribeVolumeAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVolumeAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27337,7 +27687,7 @@ func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) HandleSe return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcBlockPublicAccessExclusionsInput) + input, ok := in.Parameters.(*DescribeVolumeAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27362,10 +27712,10 @@ func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) HandleSe bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcBlockPublicAccessExclusions") + body.Key("Action").String("DescribeVolumeAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcBlockPublicAccessExclusionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVolumeAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27388,14 +27738,14 @@ func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) HandleSe return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions struct { +type awsEc2query_serializeOpDescribeVolumes struct { } -func (*awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) ID() string { +func (*awsEc2query_serializeOpDescribeVolumes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVolumes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27407,7 +27757,7 @@ func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcBlockPublicAccessOptionsInput) + input, ok := in.Parameters.(*DescribeVolumesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27432,10 +27782,10 @@ func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcBlockPublicAccessOptions") + body.Key("Action").String("DescribeVolumes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcBlockPublicAccessOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVolumesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27458,14 +27808,14 @@ func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcClassicLink struct { +type awsEc2query_serializeOpDescribeVolumesModifications struct { } -func (*awsEc2query_serializeOpDescribeVpcClassicLink) ID() string { +func (*awsEc2query_serializeOpDescribeVolumesModifications) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcClassicLink) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVolumesModifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27477,7 +27827,7 @@ func (m *awsEc2query_serializeOpDescribeVpcClassicLink) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcClassicLinkInput) + input, ok := in.Parameters.(*DescribeVolumesModificationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27502,10 +27852,10 @@ func (m *awsEc2query_serializeOpDescribeVpcClassicLink) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcClassicLink") + body.Key("Action").String("DescribeVolumesModifications") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcClassicLinkInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVolumesModificationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27528,14 +27878,14 @@ func (m *awsEc2query_serializeOpDescribeVpcClassicLink) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport struct { +type awsEc2query_serializeOpDescribeVolumeStatus struct { } -func (*awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) ID() string { +func (*awsEc2query_serializeOpDescribeVolumeStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVolumeStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27547,7 +27897,7 @@ func (m *awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcClassicLinkDnsSupportInput) + input, ok := in.Parameters.(*DescribeVolumeStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27572,10 +27922,10 @@ func (m *awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcClassicLinkDnsSupport") + body.Key("Action").String("DescribeVolumeStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcClassicLinkDnsSupportInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVolumeStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27598,14 +27948,14 @@ func (m *awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpointAssociations struct { +type awsEc2query_serializeOpDescribeVpcAttribute struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpointAssociations) ID() string { +func (*awsEc2query_serializeOpDescribeVpcAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpointAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27617,7 +27967,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointAssociations) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointAssociationsInput) + input, ok := in.Parameters.(*DescribeVpcAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27642,10 +27992,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointAssociations) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpointAssociations") + body.Key("Action").String("DescribeVpcAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27668,14 +28018,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointAssociations) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications struct { +type awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) ID() string { +func (*awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessExclusions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27687,7 +28037,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) Hand return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointConnectionNotificationsInput) + input, ok := in.Parameters.(*DescribeVpcBlockPublicAccessExclusionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27712,10 +28062,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) Hand bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpointConnectionNotifications") + body.Key("Action").String("DescribeVpcBlockPublicAccessExclusions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointConnectionNotificationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcBlockPublicAccessExclusionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27738,14 +28088,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) Hand return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpointConnections struct { +type awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpointConnections) ID() string { +func (*awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpointConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcBlockPublicAccessOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27757,7 +28107,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointConnections) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointConnectionsInput) + input, ok := in.Parameters.(*DescribeVpcBlockPublicAccessOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27782,10 +28132,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointConnections) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpointConnections") + body.Key("Action").String("DescribeVpcBlockPublicAccessOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointConnectionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcBlockPublicAccessOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27808,14 +28158,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointConnections) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpoints struct { +type awsEc2query_serializeOpDescribeVpcClassicLink struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpoints) ID() string { +func (*awsEc2query_serializeOpDescribeVpcClassicLink) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcClassicLink) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27827,7 +28177,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpoints) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointsInput) + input, ok := in.Parameters.(*DescribeVpcClassicLinkInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27852,10 +28202,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpoints) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpoints") + body.Key("Action").String("DescribeVpcClassicLink") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcClassicLinkInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27878,14 +28228,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpoints) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations struct { +type awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) ID() string { +func (*awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcClassicLinkDnsSupport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27897,7 +28247,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) Handle return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointServiceConfigurationsInput) + input, ok := in.Parameters.(*DescribeVpcClassicLinkDnsSupportInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27922,10 +28272,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) Handle bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpointServiceConfigurations") + body.Key("Action").String("DescribeVpcClassicLinkDnsSupport") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointServiceConfigurationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcClassicLinkDnsSupportInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -27948,14 +28298,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) Handle return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpointServicePermissions struct { +type awsEc2query_serializeOpDescribeVpcEndpointAssociations struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpointAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpointAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -27967,7 +28317,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointServicePermissionsInput) + input, ok := in.Parameters.(*DescribeVpcEndpointAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -27992,10 +28342,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpointServicePermissions") + body.Key("Action").String("DescribeVpcEndpointAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointServicePermissionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28018,14 +28368,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcEndpointServices struct { +type awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications struct { } -func (*awsEc2query_serializeOpDescribeVpcEndpointServices) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcEndpointServices) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpointConnectionNotifications) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28037,7 +28387,7 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServices) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcEndpointServicesInput) + input, ok := in.Parameters.(*DescribeVpcEndpointConnectionNotificationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28062,10 +28412,10 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServices) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcEndpointServices") + body.Key("Action").String("DescribeVpcEndpointConnectionNotifications") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointServicesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointConnectionNotificationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28088,14 +28438,14 @@ func (m *awsEc2query_serializeOpDescribeVpcEndpointServices) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcPeeringConnections struct { +type awsEc2query_serializeOpDescribeVpcEndpointConnections struct { } -func (*awsEc2query_serializeOpDescribeVpcPeeringConnections) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpointConnections) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcPeeringConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpointConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28107,7 +28457,7 @@ func (m *awsEc2query_serializeOpDescribeVpcPeeringConnections) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcPeeringConnectionsInput) + input, ok := in.Parameters.(*DescribeVpcEndpointConnectionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28132,10 +28482,10 @@ func (m *awsEc2query_serializeOpDescribeVpcPeeringConnections) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcPeeringConnections") + body.Key("Action").String("DescribeVpcEndpointConnections") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcPeeringConnectionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointConnectionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28158,14 +28508,14 @@ func (m *awsEc2query_serializeOpDescribeVpcPeeringConnections) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpcs struct { +type awsEc2query_serializeOpDescribeVpcEndpoints struct { } -func (*awsEc2query_serializeOpDescribeVpcs) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpoints) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpcs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28177,7 +28527,7 @@ func (m *awsEc2query_serializeOpDescribeVpcs) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpcsInput) + input, ok := in.Parameters.(*DescribeVpcEndpointsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28202,10 +28552,10 @@ func (m *awsEc2query_serializeOpDescribeVpcs) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpcs") + body.Key("Action").String("DescribeVpcEndpoints") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpcsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28228,14 +28578,14 @@ func (m *awsEc2query_serializeOpDescribeVpcs) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpnConnections struct { +type awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations struct { } -func (*awsEc2query_serializeOpDescribeVpnConnections) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpnConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpointServiceConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28247,7 +28597,7 @@ func (m *awsEc2query_serializeOpDescribeVpnConnections) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpnConnectionsInput) + input, ok := in.Parameters.(*DescribeVpcEndpointServiceConfigurationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28272,10 +28622,10 @@ func (m *awsEc2query_serializeOpDescribeVpnConnections) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpnConnections") + body.Key("Action").String("DescribeVpcEndpointServiceConfigurations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpnConnectionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointServiceConfigurationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28298,14 +28648,14 @@ func (m *awsEc2query_serializeOpDescribeVpnConnections) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDescribeVpnGateways struct { +type awsEc2query_serializeOpDescribeVpcEndpointServicePermissions struct { } -func (*awsEc2query_serializeOpDescribeVpnGateways) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDescribeVpnGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpointServicePermissions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28317,7 +28667,7 @@ func (m *awsEc2query_serializeOpDescribeVpnGateways) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DescribeVpnGatewaysInput) + input, ok := in.Parameters.(*DescribeVpcEndpointServicePermissionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28342,10 +28692,10 @@ func (m *awsEc2query_serializeOpDescribeVpnGateways) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DescribeVpnGateways") + body.Key("Action").String("DescribeVpcEndpointServicePermissions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDescribeVpnGatewaysInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointServicePermissionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28368,14 +28718,14 @@ func (m *awsEc2query_serializeOpDescribeVpnGateways) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDetachClassicLinkVpc struct { +type awsEc2query_serializeOpDescribeVpcEndpointServices struct { } -func (*awsEc2query_serializeOpDetachClassicLinkVpc) ID() string { +func (*awsEc2query_serializeOpDescribeVpcEndpointServices) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDetachClassicLinkVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcEndpointServices) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28387,7 +28737,7 @@ func (m *awsEc2query_serializeOpDetachClassicLinkVpc) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DetachClassicLinkVpcInput) + input, ok := in.Parameters.(*DescribeVpcEndpointServicesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28412,10 +28762,10 @@ func (m *awsEc2query_serializeOpDetachClassicLinkVpc) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DetachClassicLinkVpc") + body.Key("Action").String("DescribeVpcEndpointServices") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDetachClassicLinkVpcInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcEndpointServicesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28438,14 +28788,14 @@ func (m *awsEc2query_serializeOpDetachClassicLinkVpc) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDetachInternetGateway struct { +type awsEc2query_serializeOpDescribeVpcPeeringConnections struct { } -func (*awsEc2query_serializeOpDetachInternetGateway) ID() string { +func (*awsEc2query_serializeOpDescribeVpcPeeringConnections) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDetachInternetGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcPeeringConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28457,7 +28807,7 @@ func (m *awsEc2query_serializeOpDetachInternetGateway) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DetachInternetGatewayInput) + input, ok := in.Parameters.(*DescribeVpcPeeringConnectionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28482,10 +28832,10 @@ func (m *awsEc2query_serializeOpDetachInternetGateway) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DetachInternetGateway") + body.Key("Action").String("DescribeVpcPeeringConnections") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDetachInternetGatewayInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcPeeringConnectionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28508,14 +28858,14 @@ func (m *awsEc2query_serializeOpDetachInternetGateway) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDetachNetworkInterface struct { +type awsEc2query_serializeOpDescribeVpcs struct { } -func (*awsEc2query_serializeOpDetachNetworkInterface) ID() string { +func (*awsEc2query_serializeOpDescribeVpcs) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDetachNetworkInterface) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpcs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28527,7 +28877,7 @@ func (m *awsEc2query_serializeOpDetachNetworkInterface) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DetachNetworkInterfaceInput) + input, ok := in.Parameters.(*DescribeVpcsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28552,10 +28902,10 @@ func (m *awsEc2query_serializeOpDetachNetworkInterface) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DetachNetworkInterface") + body.Key("Action").String("DescribeVpcs") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDetachNetworkInterfaceInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpcsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28578,14 +28928,14 @@ func (m *awsEc2query_serializeOpDetachNetworkInterface) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDetachVerifiedAccessTrustProvider struct { +type awsEc2query_serializeOpDescribeVpnConnections struct { } -func (*awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) ID() string { +func (*awsEc2query_serializeOpDescribeVpnConnections) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpnConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28597,7 +28947,7 @@ func (m *awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DetachVerifiedAccessTrustProviderInput) + input, ok := in.Parameters.(*DescribeVpnConnectionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28622,10 +28972,10 @@ func (m *awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DetachVerifiedAccessTrustProvider") + body.Key("Action").String("DescribeVpnConnections") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDetachVerifiedAccessTrustProviderInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpnConnectionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28648,14 +28998,14 @@ func (m *awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDetachVolume struct { +type awsEc2query_serializeOpDescribeVpnGateways struct { } -func (*awsEc2query_serializeOpDetachVolume) ID() string { +func (*awsEc2query_serializeOpDescribeVpnGateways) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDetachVolume) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDescribeVpnGateways) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28667,7 +29017,7 @@ func (m *awsEc2query_serializeOpDetachVolume) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DetachVolumeInput) + input, ok := in.Parameters.(*DescribeVpnGatewaysInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28692,10 +29042,10 @@ func (m *awsEc2query_serializeOpDetachVolume) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DetachVolume") + body.Key("Action").String("DescribeVpnGateways") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDetachVolumeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDescribeVpnGatewaysInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28718,14 +29068,14 @@ func (m *awsEc2query_serializeOpDetachVolume) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDetachVpnGateway struct { +type awsEc2query_serializeOpDetachClassicLinkVpc struct { } -func (*awsEc2query_serializeOpDetachVpnGateway) ID() string { +func (*awsEc2query_serializeOpDetachClassicLinkVpc) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDetachVpnGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDetachClassicLinkVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28737,7 +29087,7 @@ func (m *awsEc2query_serializeOpDetachVpnGateway) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DetachVpnGatewayInput) + input, ok := in.Parameters.(*DetachClassicLinkVpcInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28762,10 +29112,10 @@ func (m *awsEc2query_serializeOpDetachVpnGateway) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DetachVpnGateway") + body.Key("Action").String("DetachClassicLinkVpc") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDetachVpnGatewayInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDetachClassicLinkVpcInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28788,14 +29138,14 @@ func (m *awsEc2query_serializeOpDetachVpnGateway) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableAddressTransfer struct { +type awsEc2query_serializeOpDetachInternetGateway struct { } -func (*awsEc2query_serializeOpDisableAddressTransfer) ID() string { +func (*awsEc2query_serializeOpDetachInternetGateway) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableAddressTransfer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDetachInternetGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28807,7 +29157,7 @@ func (m *awsEc2query_serializeOpDisableAddressTransfer) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableAddressTransferInput) + input, ok := in.Parameters.(*DetachInternetGatewayInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28832,10 +29182,10 @@ func (m *awsEc2query_serializeOpDisableAddressTransfer) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableAddressTransfer") + body.Key("Action").String("DetachInternetGateway") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableAddressTransferInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDetachInternetGatewayInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28858,14 +29208,14 @@ func (m *awsEc2query_serializeOpDisableAddressTransfer) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableAllowedImagesSettings struct { +type awsEc2query_serializeOpDetachNetworkInterface struct { } -func (*awsEc2query_serializeOpDisableAllowedImagesSettings) ID() string { +func (*awsEc2query_serializeOpDetachNetworkInterface) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDetachNetworkInterface) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28877,7 +29227,7 @@ func (m *awsEc2query_serializeOpDisableAllowedImagesSettings) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableAllowedImagesSettingsInput) + input, ok := in.Parameters.(*DetachNetworkInterfaceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28902,10 +29252,10 @@ func (m *awsEc2query_serializeOpDisableAllowedImagesSettings) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableAllowedImagesSettings") + body.Key("Action").String("DetachNetworkInterface") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDetachNetworkInterfaceInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28928,14 +29278,14 @@ func (m *awsEc2query_serializeOpDisableAllowedImagesSettings) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription struct { +type awsEc2query_serializeOpDetachVerifiedAccessTrustProvider struct { } -func (*awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) ID() string { +func (*awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDetachVerifiedAccessTrustProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -28947,7 +29297,7 @@ func (m *awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableAwsNetworkPerformanceMetricSubscriptionInput) + input, ok := in.Parameters.(*DetachVerifiedAccessTrustProviderInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -28972,10 +29322,10 @@ func (m *awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableAwsNetworkPerformanceMetricSubscription") + body.Key("Action").String("DetachVerifiedAccessTrustProvider") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableAwsNetworkPerformanceMetricSubscriptionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDetachVerifiedAccessTrustProviderInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -28998,14 +29348,14 @@ func (m *awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableEbsEncryptionByDefault struct { +type awsEc2query_serializeOpDetachVolume struct { } -func (*awsEc2query_serializeOpDisableEbsEncryptionByDefault) ID() string { +func (*awsEc2query_serializeOpDetachVolume) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableEbsEncryptionByDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDetachVolume) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29017,7 +29367,7 @@ func (m *awsEc2query_serializeOpDisableEbsEncryptionByDefault) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableEbsEncryptionByDefaultInput) + input, ok := in.Parameters.(*DetachVolumeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29042,10 +29392,10 @@ func (m *awsEc2query_serializeOpDisableEbsEncryptionByDefault) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableEbsEncryptionByDefault") + body.Key("Action").String("DetachVolume") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableEbsEncryptionByDefaultInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDetachVolumeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29068,14 +29418,14 @@ func (m *awsEc2query_serializeOpDisableEbsEncryptionByDefault) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableFastLaunch struct { +type awsEc2query_serializeOpDetachVpnGateway struct { } -func (*awsEc2query_serializeOpDisableFastLaunch) ID() string { +func (*awsEc2query_serializeOpDetachVpnGateway) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableFastLaunch) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDetachVpnGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29087,7 +29437,7 @@ func (m *awsEc2query_serializeOpDisableFastLaunch) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableFastLaunchInput) + input, ok := in.Parameters.(*DetachVpnGatewayInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29112,10 +29462,10 @@ func (m *awsEc2query_serializeOpDisableFastLaunch) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableFastLaunch") + body.Key("Action").String("DetachVpnGateway") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableFastLaunchInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDetachVpnGatewayInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29138,14 +29488,14 @@ func (m *awsEc2query_serializeOpDisableFastLaunch) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableFastSnapshotRestores struct { +type awsEc2query_serializeOpDisableAddressTransfer struct { } -func (*awsEc2query_serializeOpDisableFastSnapshotRestores) ID() string { +func (*awsEc2query_serializeOpDisableAddressTransfer) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableFastSnapshotRestores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableAddressTransfer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29157,7 +29507,7 @@ func (m *awsEc2query_serializeOpDisableFastSnapshotRestores) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableFastSnapshotRestoresInput) + input, ok := in.Parameters.(*DisableAddressTransferInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29182,10 +29532,10 @@ func (m *awsEc2query_serializeOpDisableFastSnapshotRestores) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableFastSnapshotRestores") + body.Key("Action").String("DisableAddressTransfer") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableFastSnapshotRestoresInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableAddressTransferInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29208,14 +29558,14 @@ func (m *awsEc2query_serializeOpDisableFastSnapshotRestores) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableImage struct { +type awsEc2query_serializeOpDisableAllowedImagesSettings struct { } -func (*awsEc2query_serializeOpDisableImage) ID() string { +func (*awsEc2query_serializeOpDisableAllowedImagesSettings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29227,7 +29577,7 @@ func (m *awsEc2query_serializeOpDisableImage) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableImageInput) + input, ok := in.Parameters.(*DisableAllowedImagesSettingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29252,10 +29602,10 @@ func (m *awsEc2query_serializeOpDisableImage) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableImage") + body.Key("Action").String("DisableAllowedImagesSettings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableImageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29278,14 +29628,14 @@ func (m *awsEc2query_serializeOpDisableImage) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableImageBlockPublicAccess struct { +type awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription struct { } -func (*awsEc2query_serializeOpDisableImageBlockPublicAccess) ID() string { +func (*awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableImageBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableAwsNetworkPerformanceMetricSubscription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29297,7 +29647,7 @@ func (m *awsEc2query_serializeOpDisableImageBlockPublicAccess) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableImageBlockPublicAccessInput) + input, ok := in.Parameters.(*DisableAwsNetworkPerformanceMetricSubscriptionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29322,10 +29672,10 @@ func (m *awsEc2query_serializeOpDisableImageBlockPublicAccess) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableImageBlockPublicAccess") + body.Key("Action").String("DisableAwsNetworkPerformanceMetricSubscription") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableImageBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableAwsNetworkPerformanceMetricSubscriptionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29348,14 +29698,14 @@ func (m *awsEc2query_serializeOpDisableImageBlockPublicAccess) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableImageDeprecation struct { +type awsEc2query_serializeOpDisableEbsEncryptionByDefault struct { } -func (*awsEc2query_serializeOpDisableImageDeprecation) ID() string { +func (*awsEc2query_serializeOpDisableEbsEncryptionByDefault) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableImageDeprecation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableEbsEncryptionByDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29367,7 +29717,7 @@ func (m *awsEc2query_serializeOpDisableImageDeprecation) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableImageDeprecationInput) + input, ok := in.Parameters.(*DisableEbsEncryptionByDefaultInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29392,10 +29742,10 @@ func (m *awsEc2query_serializeOpDisableImageDeprecation) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableImageDeprecation") + body.Key("Action").String("DisableEbsEncryptionByDefault") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableImageDeprecationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableEbsEncryptionByDefaultInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29418,14 +29768,14 @@ func (m *awsEc2query_serializeOpDisableImageDeprecation) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableImageDeregistrationProtection struct { +type awsEc2query_serializeOpDisableFastLaunch struct { } -func (*awsEc2query_serializeOpDisableImageDeregistrationProtection) ID() string { +func (*awsEc2query_serializeOpDisableFastLaunch) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableImageDeregistrationProtection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableFastLaunch) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29437,7 +29787,7 @@ func (m *awsEc2query_serializeOpDisableImageDeregistrationProtection) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableImageDeregistrationProtectionInput) + input, ok := in.Parameters.(*DisableFastLaunchInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29462,10 +29812,10 @@ func (m *awsEc2query_serializeOpDisableImageDeregistrationProtection) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableImageDeregistrationProtection") + body.Key("Action").String("DisableFastLaunch") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableImageDeregistrationProtectionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableFastLaunchInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29488,14 +29838,14 @@ func (m *awsEc2query_serializeOpDisableImageDeregistrationProtection) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableIpamOrganizationAdminAccount struct { +type awsEc2query_serializeOpDisableFastSnapshotRestores struct { } -func (*awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) ID() string { +func (*awsEc2query_serializeOpDisableFastSnapshotRestores) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableFastSnapshotRestores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29507,7 +29857,7 @@ func (m *awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableIpamOrganizationAdminAccountInput) + input, ok := in.Parameters.(*DisableFastSnapshotRestoresInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29532,10 +29882,10 @@ func (m *awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableIpamOrganizationAdminAccount") + body.Key("Action").String("DisableFastSnapshotRestores") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableIpamOrganizationAdminAccountInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableFastSnapshotRestoresInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29558,14 +29908,14 @@ func (m *awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableRouteServerPropagation struct { +type awsEc2query_serializeOpDisableImage struct { } -func (*awsEc2query_serializeOpDisableRouteServerPropagation) ID() string { +func (*awsEc2query_serializeOpDisableImage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableRouteServerPropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29577,7 +29927,7 @@ func (m *awsEc2query_serializeOpDisableRouteServerPropagation) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableRouteServerPropagationInput) + input, ok := in.Parameters.(*DisableImageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29602,10 +29952,10 @@ func (m *awsEc2query_serializeOpDisableRouteServerPropagation) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableRouteServerPropagation") + body.Key("Action").String("DisableImage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableRouteServerPropagationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableImageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29628,14 +29978,14 @@ func (m *awsEc2query_serializeOpDisableRouteServerPropagation) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableSerialConsoleAccess struct { +type awsEc2query_serializeOpDisableImageBlockPublicAccess struct { } -func (*awsEc2query_serializeOpDisableSerialConsoleAccess) ID() string { +func (*awsEc2query_serializeOpDisableImageBlockPublicAccess) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableSerialConsoleAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableImageBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29647,7 +29997,7 @@ func (m *awsEc2query_serializeOpDisableSerialConsoleAccess) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableSerialConsoleAccessInput) + input, ok := in.Parameters.(*DisableImageBlockPublicAccessInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29672,10 +30022,10 @@ func (m *awsEc2query_serializeOpDisableSerialConsoleAccess) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableSerialConsoleAccess") + body.Key("Action").String("DisableImageBlockPublicAccess") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableSerialConsoleAccessInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableImageBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29698,14 +30048,14 @@ func (m *awsEc2query_serializeOpDisableSerialConsoleAccess) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableSnapshotBlockPublicAccess struct { +type awsEc2query_serializeOpDisableImageDeprecation struct { } -func (*awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) ID() string { +func (*awsEc2query_serializeOpDisableImageDeprecation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableImageDeprecation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29717,7 +30067,7 @@ func (m *awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableSnapshotBlockPublicAccessInput) + input, ok := in.Parameters.(*DisableImageDeprecationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29742,10 +30092,10 @@ func (m *awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableSnapshotBlockPublicAccess") + body.Key("Action").String("DisableImageDeprecation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableSnapshotBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableImageDeprecationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29768,14 +30118,14 @@ func (m *awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation struct { +type awsEc2query_serializeOpDisableImageDeregistrationProtection struct { } -func (*awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) ID() string { +func (*awsEc2query_serializeOpDisableImageDeregistrationProtection) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableImageDeregistrationProtection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29787,7 +30137,7 @@ func (m *awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) Hand return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableTransitGatewayRouteTablePropagationInput) + input, ok := in.Parameters.(*DisableImageDeregistrationProtectionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29812,10 +30162,10 @@ func (m *awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) Hand bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableTransitGatewayRouteTablePropagation") + body.Key("Action").String("DisableImageDeregistrationProtection") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableTransitGatewayRouteTablePropagationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableImageDeregistrationProtectionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29838,14 +30188,14 @@ func (m *awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) Hand return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableVgwRoutePropagation struct { +type awsEc2query_serializeOpDisableIpamOrganizationAdminAccount struct { } -func (*awsEc2query_serializeOpDisableVgwRoutePropagation) ID() string { +func (*awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableVgwRoutePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableIpamOrganizationAdminAccount) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29857,7 +30207,7 @@ func (m *awsEc2query_serializeOpDisableVgwRoutePropagation) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableVgwRoutePropagationInput) + input, ok := in.Parameters.(*DisableIpamOrganizationAdminAccountInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29882,10 +30232,10 @@ func (m *awsEc2query_serializeOpDisableVgwRoutePropagation) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableVgwRoutePropagation") + body.Key("Action").String("DisableIpamOrganizationAdminAccount") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableVgwRoutePropagationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableIpamOrganizationAdminAccountInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29908,14 +30258,14 @@ func (m *awsEc2query_serializeOpDisableVgwRoutePropagation) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableVpcClassicLink struct { +type awsEc2query_serializeOpDisableRouteServerPropagation struct { } -func (*awsEc2query_serializeOpDisableVpcClassicLink) ID() string { +func (*awsEc2query_serializeOpDisableRouteServerPropagation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableVpcClassicLink) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableRouteServerPropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29927,7 +30277,7 @@ func (m *awsEc2query_serializeOpDisableVpcClassicLink) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableVpcClassicLinkInput) + input, ok := in.Parameters.(*DisableRouteServerPropagationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -29952,10 +30302,10 @@ func (m *awsEc2query_serializeOpDisableVpcClassicLink) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableVpcClassicLink") + body.Key("Action").String("DisableRouteServerPropagation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableVpcClassicLinkInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableRouteServerPropagationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -29978,14 +30328,14 @@ func (m *awsEc2query_serializeOpDisableVpcClassicLink) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport struct { +type awsEc2query_serializeOpDisableSerialConsoleAccess struct { } -func (*awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) ID() string { +func (*awsEc2query_serializeOpDisableSerialConsoleAccess) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableSerialConsoleAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -29997,7 +30347,7 @@ func (m *awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisableVpcClassicLinkDnsSupportInput) + input, ok := in.Parameters.(*DisableSerialConsoleAccessInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30022,10 +30372,10 @@ func (m *awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisableVpcClassicLinkDnsSupport") + body.Key("Action").String("DisableSerialConsoleAccess") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisableVpcClassicLinkDnsSupportInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableSerialConsoleAccessInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30048,14 +30398,14 @@ func (m *awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateAddress struct { +type awsEc2query_serializeOpDisableSnapshotBlockPublicAccess struct { } -func (*awsEc2query_serializeOpDisassociateAddress) ID() string { +func (*awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateAddress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableSnapshotBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30067,7 +30417,7 @@ func (m *awsEc2query_serializeOpDisassociateAddress) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateAddressInput) + input, ok := in.Parameters.(*DisableSnapshotBlockPublicAccessInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30092,10 +30442,10 @@ func (m *awsEc2query_serializeOpDisassociateAddress) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateAddress") + body.Key("Action").String("DisableSnapshotBlockPublicAccess") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateAddressInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableSnapshotBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30118,14 +30468,14 @@ func (m *awsEc2query_serializeOpDisassociateAddress) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner struct { +type awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation struct { } -func (*awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) ID() string { +func (*awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableTransitGatewayRouteTablePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30137,7 +30487,7 @@ func (m *awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateCapacityReservationBillingOwnerInput) + input, ok := in.Parameters.(*DisableTransitGatewayRouteTablePropagationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30162,10 +30512,10 @@ func (m *awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateCapacityReservationBillingOwner") + body.Key("Action").String("DisableTransitGatewayRouteTablePropagation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateCapacityReservationBillingOwnerInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableTransitGatewayRouteTablePropagationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30188,14 +30538,14 @@ func (m *awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateClientVpnTargetNetwork struct { +type awsEc2query_serializeOpDisableVgwRoutePropagation struct { } -func (*awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) ID() string { +func (*awsEc2query_serializeOpDisableVgwRoutePropagation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableVgwRoutePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30207,7 +30557,7 @@ func (m *awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateClientVpnTargetNetworkInput) + input, ok := in.Parameters.(*DisableVgwRoutePropagationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30232,10 +30582,10 @@ func (m *awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateClientVpnTargetNetwork") + body.Key("Action").String("DisableVgwRoutePropagation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateClientVpnTargetNetworkInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableVgwRoutePropagationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30258,14 +30608,14 @@ func (m *awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole struct { +type awsEc2query_serializeOpDisableVpcClassicLink struct { } -func (*awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) ID() string { +func (*awsEc2query_serializeOpDisableVpcClassicLink) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableVpcClassicLink) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30277,7 +30627,7 @@ func (m *awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateEnclaveCertificateIamRoleInput) + input, ok := in.Parameters.(*DisableVpcClassicLinkInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30302,10 +30652,10 @@ func (m *awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateEnclaveCertificateIamRole") + body.Key("Action").String("DisableVpcClassicLink") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateEnclaveCertificateIamRoleInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableVpcClassicLinkInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30328,14 +30678,14 @@ func (m *awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateIamInstanceProfile struct { +type awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport struct { } -func (*awsEc2query_serializeOpDisassociateIamInstanceProfile) ID() string { +func (*awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateIamInstanceProfile) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisableVpcClassicLinkDnsSupport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30347,7 +30697,7 @@ func (m *awsEc2query_serializeOpDisassociateIamInstanceProfile) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateIamInstanceProfileInput) + input, ok := in.Parameters.(*DisableVpcClassicLinkDnsSupportInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30372,10 +30722,10 @@ func (m *awsEc2query_serializeOpDisassociateIamInstanceProfile) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateIamInstanceProfile") + body.Key("Action").String("DisableVpcClassicLinkDnsSupport") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateIamInstanceProfileInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisableVpcClassicLinkDnsSupportInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30398,14 +30748,14 @@ func (m *awsEc2query_serializeOpDisassociateIamInstanceProfile) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateInstanceEventWindow struct { +type awsEc2query_serializeOpDisassociateAddress struct { } -func (*awsEc2query_serializeOpDisassociateInstanceEventWindow) ID() string { +func (*awsEc2query_serializeOpDisassociateAddress) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateInstanceEventWindow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateAddress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30417,7 +30767,7 @@ func (m *awsEc2query_serializeOpDisassociateInstanceEventWindow) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateInstanceEventWindowInput) + input, ok := in.Parameters.(*DisassociateAddressInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30442,10 +30792,10 @@ func (m *awsEc2query_serializeOpDisassociateInstanceEventWindow) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateInstanceEventWindow") + body.Key("Action").String("DisassociateAddress") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateInstanceEventWindowInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateAddressInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30468,14 +30818,14 @@ func (m *awsEc2query_serializeOpDisassociateInstanceEventWindow) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateIpamByoasn struct { +type awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner struct { } -func (*awsEc2query_serializeOpDisassociateIpamByoasn) ID() string { +func (*awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateIpamByoasn) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateCapacityReservationBillingOwner) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30487,7 +30837,7 @@ func (m *awsEc2query_serializeOpDisassociateIpamByoasn) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateIpamByoasnInput) + input, ok := in.Parameters.(*DisassociateCapacityReservationBillingOwnerInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30512,10 +30862,10 @@ func (m *awsEc2query_serializeOpDisassociateIpamByoasn) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateIpamByoasn") + body.Key("Action").String("DisassociateCapacityReservationBillingOwner") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateIpamByoasnInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateCapacityReservationBillingOwnerInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30538,14 +30888,14 @@ func (m *awsEc2query_serializeOpDisassociateIpamByoasn) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateIpamResourceDiscovery struct { +type awsEc2query_serializeOpDisassociateClientVpnTargetNetwork struct { } -func (*awsEc2query_serializeOpDisassociateIpamResourceDiscovery) ID() string { +func (*awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateIpamResourceDiscovery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateClientVpnTargetNetwork) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30557,7 +30907,7 @@ func (m *awsEc2query_serializeOpDisassociateIpamResourceDiscovery) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateIpamResourceDiscoveryInput) + input, ok := in.Parameters.(*DisassociateClientVpnTargetNetworkInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30582,10 +30932,10 @@ func (m *awsEc2query_serializeOpDisassociateIpamResourceDiscovery) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateIpamResourceDiscovery") + body.Key("Action").String("DisassociateClientVpnTargetNetwork") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateIpamResourceDiscoveryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateClientVpnTargetNetworkInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30608,14 +30958,14 @@ func (m *awsEc2query_serializeOpDisassociateIpamResourceDiscovery) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateNatGatewayAddress struct { +type awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole struct { } -func (*awsEc2query_serializeOpDisassociateNatGatewayAddress) ID() string { +func (*awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateNatGatewayAddress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateEnclaveCertificateIamRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30627,7 +30977,7 @@ func (m *awsEc2query_serializeOpDisassociateNatGatewayAddress) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateNatGatewayAddressInput) + input, ok := in.Parameters.(*DisassociateEnclaveCertificateIamRoleInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30652,10 +31002,10 @@ func (m *awsEc2query_serializeOpDisassociateNatGatewayAddress) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateNatGatewayAddress") + body.Key("Action").String("DisassociateEnclaveCertificateIamRole") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateNatGatewayAddressInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateEnclaveCertificateIamRoleInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30678,14 +31028,14 @@ func (m *awsEc2query_serializeOpDisassociateNatGatewayAddress) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateRouteServer struct { +type awsEc2query_serializeOpDisassociateIamInstanceProfile struct { } -func (*awsEc2query_serializeOpDisassociateRouteServer) ID() string { +func (*awsEc2query_serializeOpDisassociateIamInstanceProfile) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateRouteServer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateIamInstanceProfile) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30697,7 +31047,7 @@ func (m *awsEc2query_serializeOpDisassociateRouteServer) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateRouteServerInput) + input, ok := in.Parameters.(*DisassociateIamInstanceProfileInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30722,10 +31072,10 @@ func (m *awsEc2query_serializeOpDisassociateRouteServer) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateRouteServer") + body.Key("Action").String("DisassociateIamInstanceProfile") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateRouteServerInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateIamInstanceProfileInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30748,14 +31098,14 @@ func (m *awsEc2query_serializeOpDisassociateRouteServer) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateRouteTable struct { +type awsEc2query_serializeOpDisassociateInstanceEventWindow struct { } -func (*awsEc2query_serializeOpDisassociateRouteTable) ID() string { +func (*awsEc2query_serializeOpDisassociateInstanceEventWindow) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateRouteTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateInstanceEventWindow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30767,7 +31117,7 @@ func (m *awsEc2query_serializeOpDisassociateRouteTable) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateRouteTableInput) + input, ok := in.Parameters.(*DisassociateInstanceEventWindowInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30792,10 +31142,10 @@ func (m *awsEc2query_serializeOpDisassociateRouteTable) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateRouteTable") + body.Key("Action").String("DisassociateInstanceEventWindow") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateRouteTableInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateInstanceEventWindowInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30818,14 +31168,14 @@ func (m *awsEc2query_serializeOpDisassociateRouteTable) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateSecurityGroupVpc struct { +type awsEc2query_serializeOpDisassociateIpamByoasn struct { } -func (*awsEc2query_serializeOpDisassociateSecurityGroupVpc) ID() string { +func (*awsEc2query_serializeOpDisassociateIpamByoasn) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateSecurityGroupVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateIpamByoasn) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30837,7 +31187,7 @@ func (m *awsEc2query_serializeOpDisassociateSecurityGroupVpc) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateSecurityGroupVpcInput) + input, ok := in.Parameters.(*DisassociateIpamByoasnInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30862,10 +31212,10 @@ func (m *awsEc2query_serializeOpDisassociateSecurityGroupVpc) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateSecurityGroupVpc") + body.Key("Action").String("DisassociateIpamByoasn") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateSecurityGroupVpcInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateIpamByoasnInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30888,14 +31238,14 @@ func (m *awsEc2query_serializeOpDisassociateSecurityGroupVpc) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateSubnetCidrBlock struct { +type awsEc2query_serializeOpDisassociateIpamResourceDiscovery struct { } -func (*awsEc2query_serializeOpDisassociateSubnetCidrBlock) ID() string { +func (*awsEc2query_serializeOpDisassociateIpamResourceDiscovery) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateSubnetCidrBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateIpamResourceDiscovery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30907,7 +31257,7 @@ func (m *awsEc2query_serializeOpDisassociateSubnetCidrBlock) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateSubnetCidrBlockInput) + input, ok := in.Parameters.(*DisassociateIpamResourceDiscoveryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -30932,10 +31282,10 @@ func (m *awsEc2query_serializeOpDisassociateSubnetCidrBlock) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateSubnetCidrBlock") + body.Key("Action").String("DisassociateIpamResourceDiscovery") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateSubnetCidrBlockInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateIpamResourceDiscoveryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -30958,14 +31308,14 @@ func (m *awsEc2query_serializeOpDisassociateSubnetCidrBlock) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain struct { +type awsEc2query_serializeOpDisassociateNatGatewayAddress struct { } -func (*awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) ID() string { +func (*awsEc2query_serializeOpDisassociateNatGatewayAddress) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateNatGatewayAddress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -30977,7 +31327,7 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) Handl return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateTransitGatewayMulticastDomainInput) + input, ok := in.Parameters.(*DisassociateNatGatewayAddressInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31002,10 +31352,10 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) Handl bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateTransitGatewayMulticastDomain") + body.Key("Action").String("DisassociateNatGatewayAddress") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateTransitGatewayMulticastDomainInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateNatGatewayAddressInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31028,14 +31378,14 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) Handl return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable struct { +type awsEc2query_serializeOpDisassociateRouteServer struct { } -func (*awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) ID() string { +func (*awsEc2query_serializeOpDisassociateRouteServer) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateRouteServer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31047,7 +31397,7 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateTransitGatewayPolicyTableInput) + input, ok := in.Parameters.(*DisassociateRouteServerInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31072,10 +31422,10 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateTransitGatewayPolicyTable") + body.Key("Action").String("DisassociateRouteServer") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateTransitGatewayPolicyTableInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateRouteServerInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31098,14 +31448,14 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateTransitGatewayRouteTable struct { +type awsEc2query_serializeOpDisassociateRouteTable struct { } -func (*awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) ID() string { +func (*awsEc2query_serializeOpDisassociateRouteTable) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateRouteTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31117,7 +31467,7 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateTransitGatewayRouteTableInput) + input, ok := in.Parameters.(*DisassociateRouteTableInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31142,10 +31492,10 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateTransitGatewayRouteTable") + body.Key("Action").String("DisassociateRouteTable") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateTransitGatewayRouteTableInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateRouteTableInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31168,14 +31518,14 @@ func (m *awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateTrunkInterface struct { +type awsEc2query_serializeOpDisassociateSecurityGroupVpc struct { } -func (*awsEc2query_serializeOpDisassociateTrunkInterface) ID() string { +func (*awsEc2query_serializeOpDisassociateSecurityGroupVpc) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateTrunkInterface) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateSecurityGroupVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31187,7 +31537,7 @@ func (m *awsEc2query_serializeOpDisassociateTrunkInterface) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateTrunkInterfaceInput) + input, ok := in.Parameters.(*DisassociateSecurityGroupVpcInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31212,10 +31562,10 @@ func (m *awsEc2query_serializeOpDisassociateTrunkInterface) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateTrunkInterface") + body.Key("Action").String("DisassociateSecurityGroupVpc") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateTrunkInterfaceInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateSecurityGroupVpcInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31238,14 +31588,14 @@ func (m *awsEc2query_serializeOpDisassociateTrunkInterface) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpDisassociateVpcCidrBlock struct { +type awsEc2query_serializeOpDisassociateSubnetCidrBlock struct { } -func (*awsEc2query_serializeOpDisassociateVpcCidrBlock) ID() string { +func (*awsEc2query_serializeOpDisassociateSubnetCidrBlock) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpDisassociateVpcCidrBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateSubnetCidrBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31257,7 +31607,7 @@ func (m *awsEc2query_serializeOpDisassociateVpcCidrBlock) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*DisassociateVpcCidrBlockInput) + input, ok := in.Parameters.(*DisassociateSubnetCidrBlockInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31282,10 +31632,10 @@ func (m *awsEc2query_serializeOpDisassociateVpcCidrBlock) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("DisassociateVpcCidrBlock") + body.Key("Action").String("DisassociateSubnetCidrBlock") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentDisassociateVpcCidrBlockInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateSubnetCidrBlockInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31308,14 +31658,14 @@ func (m *awsEc2query_serializeOpDisassociateVpcCidrBlock) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableAddressTransfer struct { +type awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain struct { } -func (*awsEc2query_serializeOpEnableAddressTransfer) ID() string { +func (*awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableAddressTransfer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateTransitGatewayMulticastDomain) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31327,7 +31677,7 @@ func (m *awsEc2query_serializeOpEnableAddressTransfer) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableAddressTransferInput) + input, ok := in.Parameters.(*DisassociateTransitGatewayMulticastDomainInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31352,10 +31702,10 @@ func (m *awsEc2query_serializeOpEnableAddressTransfer) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableAddressTransfer") + body.Key("Action").String("DisassociateTransitGatewayMulticastDomain") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableAddressTransferInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateTransitGatewayMulticastDomainInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31378,14 +31728,14 @@ func (m *awsEc2query_serializeOpEnableAddressTransfer) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableAllowedImagesSettings struct { +type awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable struct { } -func (*awsEc2query_serializeOpEnableAllowedImagesSettings) ID() string { +func (*awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateTransitGatewayPolicyTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31397,7 +31747,7 @@ func (m *awsEc2query_serializeOpEnableAllowedImagesSettings) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableAllowedImagesSettingsInput) + input, ok := in.Parameters.(*DisassociateTransitGatewayPolicyTableInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31422,10 +31772,10 @@ func (m *awsEc2query_serializeOpEnableAllowedImagesSettings) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableAllowedImagesSettings") + body.Key("Action").String("DisassociateTransitGatewayPolicyTable") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateTransitGatewayPolicyTableInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31448,14 +31798,14 @@ func (m *awsEc2query_serializeOpEnableAllowedImagesSettings) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription struct { +type awsEc2query_serializeOpDisassociateTransitGatewayRouteTable struct { } -func (*awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) ID() string { +func (*awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateTransitGatewayRouteTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31467,7 +31817,7 @@ func (m *awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) H return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableAwsNetworkPerformanceMetricSubscriptionInput) + input, ok := in.Parameters.(*DisassociateTransitGatewayRouteTableInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31492,10 +31842,10 @@ func (m *awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) H bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableAwsNetworkPerformanceMetricSubscription") + body.Key("Action").String("DisassociateTransitGatewayRouteTable") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableAwsNetworkPerformanceMetricSubscriptionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateTransitGatewayRouteTableInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31518,14 +31868,14 @@ func (m *awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) H return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableEbsEncryptionByDefault struct { +type awsEc2query_serializeOpDisassociateTrunkInterface struct { } -func (*awsEc2query_serializeOpEnableEbsEncryptionByDefault) ID() string { +func (*awsEc2query_serializeOpDisassociateTrunkInterface) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableEbsEncryptionByDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateTrunkInterface) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31537,7 +31887,7 @@ func (m *awsEc2query_serializeOpEnableEbsEncryptionByDefault) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableEbsEncryptionByDefaultInput) + input, ok := in.Parameters.(*DisassociateTrunkInterfaceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31562,10 +31912,10 @@ func (m *awsEc2query_serializeOpEnableEbsEncryptionByDefault) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableEbsEncryptionByDefault") + body.Key("Action").String("DisassociateTrunkInterface") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableEbsEncryptionByDefaultInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateTrunkInterfaceInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31588,14 +31938,14 @@ func (m *awsEc2query_serializeOpEnableEbsEncryptionByDefault) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableFastLaunch struct { +type awsEc2query_serializeOpDisassociateVpcCidrBlock struct { } -func (*awsEc2query_serializeOpEnableFastLaunch) ID() string { +func (*awsEc2query_serializeOpDisassociateVpcCidrBlock) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableFastLaunch) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpDisassociateVpcCidrBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31607,7 +31957,7 @@ func (m *awsEc2query_serializeOpEnableFastLaunch) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableFastLaunchInput) + input, ok := in.Parameters.(*DisassociateVpcCidrBlockInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31632,10 +31982,10 @@ func (m *awsEc2query_serializeOpEnableFastLaunch) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableFastLaunch") + body.Key("Action").String("DisassociateVpcCidrBlock") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableFastLaunchInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentDisassociateVpcCidrBlockInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31658,14 +32008,14 @@ func (m *awsEc2query_serializeOpEnableFastLaunch) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableFastSnapshotRestores struct { +type awsEc2query_serializeOpEnableAddressTransfer struct { } -func (*awsEc2query_serializeOpEnableFastSnapshotRestores) ID() string { +func (*awsEc2query_serializeOpEnableAddressTransfer) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableFastSnapshotRestores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableAddressTransfer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31677,7 +32027,7 @@ func (m *awsEc2query_serializeOpEnableFastSnapshotRestores) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableFastSnapshotRestoresInput) + input, ok := in.Parameters.(*EnableAddressTransferInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31702,10 +32052,10 @@ func (m *awsEc2query_serializeOpEnableFastSnapshotRestores) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableFastSnapshotRestores") + body.Key("Action").String("EnableAddressTransfer") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableFastSnapshotRestoresInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableAddressTransferInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31728,14 +32078,14 @@ func (m *awsEc2query_serializeOpEnableFastSnapshotRestores) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableImage struct { +type awsEc2query_serializeOpEnableAllowedImagesSettings struct { } -func (*awsEc2query_serializeOpEnableImage) ID() string { +func (*awsEc2query_serializeOpEnableAllowedImagesSettings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31747,7 +32097,7 @@ func (m *awsEc2query_serializeOpEnableImage) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableImageInput) + input, ok := in.Parameters.(*EnableAllowedImagesSettingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31772,10 +32122,10 @@ func (m *awsEc2query_serializeOpEnableImage) HandleSerialize(ctx context.Context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableImage") + body.Key("Action").String("EnableAllowedImagesSettings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableImageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31798,14 +32148,14 @@ func (m *awsEc2query_serializeOpEnableImage) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableImageBlockPublicAccess struct { +type awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription struct { } -func (*awsEc2query_serializeOpEnableImageBlockPublicAccess) ID() string { +func (*awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableImageBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableAwsNetworkPerformanceMetricSubscription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31817,7 +32167,7 @@ func (m *awsEc2query_serializeOpEnableImageBlockPublicAccess) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableImageBlockPublicAccessInput) + input, ok := in.Parameters.(*EnableAwsNetworkPerformanceMetricSubscriptionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31842,10 +32192,10 @@ func (m *awsEc2query_serializeOpEnableImageBlockPublicAccess) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableImageBlockPublicAccess") + body.Key("Action").String("EnableAwsNetworkPerformanceMetricSubscription") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableImageBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableAwsNetworkPerformanceMetricSubscriptionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31868,14 +32218,14 @@ func (m *awsEc2query_serializeOpEnableImageBlockPublicAccess) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableImageDeprecation struct { +type awsEc2query_serializeOpEnableEbsEncryptionByDefault struct { } -func (*awsEc2query_serializeOpEnableImageDeprecation) ID() string { +func (*awsEc2query_serializeOpEnableEbsEncryptionByDefault) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableImageDeprecation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableEbsEncryptionByDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31887,7 +32237,7 @@ func (m *awsEc2query_serializeOpEnableImageDeprecation) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableImageDeprecationInput) + input, ok := in.Parameters.(*EnableEbsEncryptionByDefaultInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31912,10 +32262,10 @@ func (m *awsEc2query_serializeOpEnableImageDeprecation) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableImageDeprecation") + body.Key("Action").String("EnableEbsEncryptionByDefault") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableImageDeprecationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableEbsEncryptionByDefaultInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -31938,14 +32288,14 @@ func (m *awsEc2query_serializeOpEnableImageDeprecation) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableImageDeregistrationProtection struct { +type awsEc2query_serializeOpEnableFastLaunch struct { } -func (*awsEc2query_serializeOpEnableImageDeregistrationProtection) ID() string { +func (*awsEc2query_serializeOpEnableFastLaunch) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableImageDeregistrationProtection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableFastLaunch) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -31957,7 +32307,7 @@ func (m *awsEc2query_serializeOpEnableImageDeregistrationProtection) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableImageDeregistrationProtectionInput) + input, ok := in.Parameters.(*EnableFastLaunchInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -31982,10 +32332,10 @@ func (m *awsEc2query_serializeOpEnableImageDeregistrationProtection) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableImageDeregistrationProtection") + body.Key("Action").String("EnableFastLaunch") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableImageDeregistrationProtectionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableFastLaunchInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32008,14 +32358,14 @@ func (m *awsEc2query_serializeOpEnableImageDeregistrationProtection) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableIpamOrganizationAdminAccount struct { +type awsEc2query_serializeOpEnableFastSnapshotRestores struct { } -func (*awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) ID() string { +func (*awsEc2query_serializeOpEnableFastSnapshotRestores) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableFastSnapshotRestores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32027,7 +32377,7 @@ func (m *awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableIpamOrganizationAdminAccountInput) + input, ok := in.Parameters.(*EnableFastSnapshotRestoresInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32052,10 +32402,10 @@ func (m *awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableIpamOrganizationAdminAccount") + body.Key("Action").String("EnableFastSnapshotRestores") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableIpamOrganizationAdminAccountInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableFastSnapshotRestoresInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32078,14 +32428,14 @@ func (m *awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing struct { +type awsEc2query_serializeOpEnableImage struct { } -func (*awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) ID() string { +func (*awsEc2query_serializeOpEnableImage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32097,7 +32447,7 @@ func (m *awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) H return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableReachabilityAnalyzerOrganizationSharingInput) + input, ok := in.Parameters.(*EnableImageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32122,10 +32472,10 @@ func (m *awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) H bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableReachabilityAnalyzerOrganizationSharing") + body.Key("Action").String("EnableImage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableReachabilityAnalyzerOrganizationSharingInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableImageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32148,14 +32498,14 @@ func (m *awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) H return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableRouteServerPropagation struct { +type awsEc2query_serializeOpEnableImageBlockPublicAccess struct { } -func (*awsEc2query_serializeOpEnableRouteServerPropagation) ID() string { +func (*awsEc2query_serializeOpEnableImageBlockPublicAccess) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableRouteServerPropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableImageBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32167,7 +32517,7 @@ func (m *awsEc2query_serializeOpEnableRouteServerPropagation) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableRouteServerPropagationInput) + input, ok := in.Parameters.(*EnableImageBlockPublicAccessInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32192,10 +32542,10 @@ func (m *awsEc2query_serializeOpEnableRouteServerPropagation) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableRouteServerPropagation") + body.Key("Action").String("EnableImageBlockPublicAccess") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableRouteServerPropagationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableImageBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32218,14 +32568,14 @@ func (m *awsEc2query_serializeOpEnableRouteServerPropagation) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableSerialConsoleAccess struct { +type awsEc2query_serializeOpEnableImageDeprecation struct { } -func (*awsEc2query_serializeOpEnableSerialConsoleAccess) ID() string { +func (*awsEc2query_serializeOpEnableImageDeprecation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableSerialConsoleAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableImageDeprecation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32237,7 +32587,7 @@ func (m *awsEc2query_serializeOpEnableSerialConsoleAccess) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableSerialConsoleAccessInput) + input, ok := in.Parameters.(*EnableImageDeprecationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32262,10 +32612,10 @@ func (m *awsEc2query_serializeOpEnableSerialConsoleAccess) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableSerialConsoleAccess") + body.Key("Action").String("EnableImageDeprecation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableSerialConsoleAccessInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableImageDeprecationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32288,14 +32638,14 @@ func (m *awsEc2query_serializeOpEnableSerialConsoleAccess) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableSnapshotBlockPublicAccess struct { +type awsEc2query_serializeOpEnableImageDeregistrationProtection struct { } -func (*awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) ID() string { +func (*awsEc2query_serializeOpEnableImageDeregistrationProtection) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableImageDeregistrationProtection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32307,7 +32657,7 @@ func (m *awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableSnapshotBlockPublicAccessInput) + input, ok := in.Parameters.(*EnableImageDeregistrationProtectionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32332,10 +32682,10 @@ func (m *awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableSnapshotBlockPublicAccess") + body.Key("Action").String("EnableImageDeregistrationProtection") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableSnapshotBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableImageDeregistrationProtectionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32358,14 +32708,14 @@ func (m *awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation struct { +type awsEc2query_serializeOpEnableIpamOrganizationAdminAccount struct { } -func (*awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) ID() string { +func (*awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableIpamOrganizationAdminAccount) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32377,7 +32727,7 @@ func (m *awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) Handl return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableTransitGatewayRouteTablePropagationInput) + input, ok := in.Parameters.(*EnableIpamOrganizationAdminAccountInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32402,10 +32752,10 @@ func (m *awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) Handl bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableTransitGatewayRouteTablePropagation") + body.Key("Action").String("EnableIpamOrganizationAdminAccount") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableTransitGatewayRouteTablePropagationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableIpamOrganizationAdminAccountInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32428,14 +32778,14 @@ func (m *awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) Handl return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableVgwRoutePropagation struct { +type awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing struct { } -func (*awsEc2query_serializeOpEnableVgwRoutePropagation) ID() string { +func (*awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableVgwRoutePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableReachabilityAnalyzerOrganizationSharing) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32447,7 +32797,7 @@ func (m *awsEc2query_serializeOpEnableVgwRoutePropagation) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableVgwRoutePropagationInput) + input, ok := in.Parameters.(*EnableReachabilityAnalyzerOrganizationSharingInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32472,10 +32822,10 @@ func (m *awsEc2query_serializeOpEnableVgwRoutePropagation) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableVgwRoutePropagation") + body.Key("Action").String("EnableReachabilityAnalyzerOrganizationSharing") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableVgwRoutePropagationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableReachabilityAnalyzerOrganizationSharingInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32498,14 +32848,14 @@ func (m *awsEc2query_serializeOpEnableVgwRoutePropagation) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableVolumeIO struct { +type awsEc2query_serializeOpEnableRouteServerPropagation struct { } -func (*awsEc2query_serializeOpEnableVolumeIO) ID() string { +func (*awsEc2query_serializeOpEnableRouteServerPropagation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableVolumeIO) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableRouteServerPropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32517,7 +32867,7 @@ func (m *awsEc2query_serializeOpEnableVolumeIO) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableVolumeIOInput) + input, ok := in.Parameters.(*EnableRouteServerPropagationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32542,10 +32892,10 @@ func (m *awsEc2query_serializeOpEnableVolumeIO) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableVolumeIO") + body.Key("Action").String("EnableRouteServerPropagation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableVolumeIOInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableRouteServerPropagationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32568,14 +32918,14 @@ func (m *awsEc2query_serializeOpEnableVolumeIO) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableVpcClassicLink struct { +type awsEc2query_serializeOpEnableSerialConsoleAccess struct { } -func (*awsEc2query_serializeOpEnableVpcClassicLink) ID() string { +func (*awsEc2query_serializeOpEnableSerialConsoleAccess) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableVpcClassicLink) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableSerialConsoleAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32587,7 +32937,7 @@ func (m *awsEc2query_serializeOpEnableVpcClassicLink) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableVpcClassicLinkInput) + input, ok := in.Parameters.(*EnableSerialConsoleAccessInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32612,10 +32962,10 @@ func (m *awsEc2query_serializeOpEnableVpcClassicLink) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableVpcClassicLink") + body.Key("Action").String("EnableSerialConsoleAccess") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableVpcClassicLinkInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableSerialConsoleAccessInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32638,14 +32988,14 @@ func (m *awsEc2query_serializeOpEnableVpcClassicLink) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport struct { +type awsEc2query_serializeOpEnableSnapshotBlockPublicAccess struct { } -func (*awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) ID() string { +func (*awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableSnapshotBlockPublicAccess) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32657,7 +33007,7 @@ func (m *awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*EnableVpcClassicLinkDnsSupportInput) + input, ok := in.Parameters.(*EnableSnapshotBlockPublicAccessInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32682,10 +33032,10 @@ func (m *awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("EnableVpcClassicLinkDnsSupport") + body.Key("Action").String("EnableSnapshotBlockPublicAccess") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentEnableVpcClassicLinkDnsSupportInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableSnapshotBlockPublicAccessInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32708,14 +33058,14 @@ func (m *awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList struct { +type awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation struct { } -func (*awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) ID() string { +func (*awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableTransitGatewayRouteTablePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32727,7 +33077,7 @@ func (m *awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ExportClientVpnClientCertificateRevocationListInput) + input, ok := in.Parameters.(*EnableTransitGatewayRouteTablePropagationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32752,10 +33102,10 @@ func (m *awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ExportClientVpnClientCertificateRevocationList") + body.Key("Action").String("EnableTransitGatewayRouteTablePropagation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentExportClientVpnClientCertificateRevocationListInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableTransitGatewayRouteTablePropagationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32778,14 +33128,14 @@ func (m *awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpExportClientVpnClientConfiguration struct { +type awsEc2query_serializeOpEnableVgwRoutePropagation struct { } -func (*awsEc2query_serializeOpExportClientVpnClientConfiguration) ID() string { +func (*awsEc2query_serializeOpEnableVgwRoutePropagation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpExportClientVpnClientConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableVgwRoutePropagation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32797,7 +33147,7 @@ func (m *awsEc2query_serializeOpExportClientVpnClientConfiguration) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ExportClientVpnClientConfigurationInput) + input, ok := in.Parameters.(*EnableVgwRoutePropagationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32822,10 +33172,10 @@ func (m *awsEc2query_serializeOpExportClientVpnClientConfiguration) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ExportClientVpnClientConfiguration") + body.Key("Action").String("EnableVgwRoutePropagation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentExportClientVpnClientConfigurationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableVgwRoutePropagationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32848,14 +33198,14 @@ func (m *awsEc2query_serializeOpExportClientVpnClientConfiguration) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpExportImage struct { +type awsEc2query_serializeOpEnableVolumeIO struct { } -func (*awsEc2query_serializeOpExportImage) ID() string { +func (*awsEc2query_serializeOpEnableVolumeIO) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpExportImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableVolumeIO) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32867,7 +33217,7 @@ func (m *awsEc2query_serializeOpExportImage) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ExportImageInput) + input, ok := in.Parameters.(*EnableVolumeIOInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32892,10 +33242,10 @@ func (m *awsEc2query_serializeOpExportImage) HandleSerialize(ctx context.Context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ExportImage") + body.Key("Action").String("EnableVolumeIO") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentExportImageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableVolumeIOInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32918,14 +33268,14 @@ func (m *awsEc2query_serializeOpExportImage) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpExportTransitGatewayRoutes struct { +type awsEc2query_serializeOpEnableVpcClassicLink struct { } -func (*awsEc2query_serializeOpExportTransitGatewayRoutes) ID() string { +func (*awsEc2query_serializeOpEnableVpcClassicLink) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpExportTransitGatewayRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableVpcClassicLink) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -32937,7 +33287,7 @@ func (m *awsEc2query_serializeOpExportTransitGatewayRoutes) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ExportTransitGatewayRoutesInput) + input, ok := in.Parameters.(*EnableVpcClassicLinkInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -32962,10 +33312,10 @@ func (m *awsEc2query_serializeOpExportTransitGatewayRoutes) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ExportTransitGatewayRoutes") + body.Key("Action").String("EnableVpcClassicLink") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentExportTransitGatewayRoutesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableVpcClassicLinkInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -32988,14 +33338,14 @@ func (m *awsEc2query_serializeOpExportTransitGatewayRoutes) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration struct { +type awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport struct { } -func (*awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) ID() string { +func (*awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpEnableVpcClassicLinkDnsSupport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33007,7 +33357,7 @@ func (m *awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ExportVerifiedAccessInstanceClientConfigurationInput) + input, ok := in.Parameters.(*EnableVpcClassicLinkDnsSupportInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33032,10 +33382,10 @@ func (m *awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ExportVerifiedAccessInstanceClientConfiguration") + body.Key("Action").String("EnableVpcClassicLinkDnsSupport") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentExportVerifiedAccessInstanceClientConfigurationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentEnableVpcClassicLinkDnsSupportInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33058,14 +33408,14 @@ func (m *awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetAllowedImagesSettings struct { +type awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList struct { } -func (*awsEc2query_serializeOpGetAllowedImagesSettings) ID() string { +func (*awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpExportClientVpnClientCertificateRevocationList) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33077,7 +33427,7 @@ func (m *awsEc2query_serializeOpGetAllowedImagesSettings) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetAllowedImagesSettingsInput) + input, ok := in.Parameters.(*ExportClientVpnClientCertificateRevocationListInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33102,10 +33452,10 @@ func (m *awsEc2query_serializeOpGetAllowedImagesSettings) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetAllowedImagesSettings") + body.Key("Action").String("ExportClientVpnClientCertificateRevocationList") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentExportClientVpnClientCertificateRevocationListInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33128,14 +33478,14 @@ func (m *awsEc2query_serializeOpGetAllowedImagesSettings) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles struct { +type awsEc2query_serializeOpExportClientVpnClientConfiguration struct { } -func (*awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) ID() string { +func (*awsEc2query_serializeOpExportClientVpnClientConfiguration) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpExportClientVpnClientConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33147,7 +33497,7 @@ func (m *awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetAssociatedEnclaveCertificateIamRolesInput) + input, ok := in.Parameters.(*ExportClientVpnClientConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33172,10 +33522,10 @@ func (m *awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetAssociatedEnclaveCertificateIamRoles") + body.Key("Action").String("ExportClientVpnClientConfiguration") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetAssociatedEnclaveCertificateIamRolesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentExportClientVpnClientConfigurationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33198,14 +33548,14 @@ func (m *awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs struct { +type awsEc2query_serializeOpExportImage struct { } -func (*awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) ID() string { +func (*awsEc2query_serializeOpExportImage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpExportImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33217,7 +33567,7 @@ func (m *awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetAssociatedIpv6PoolCidrsInput) + input, ok := in.Parameters.(*ExportImageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33242,10 +33592,10 @@ func (m *awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetAssociatedIpv6PoolCidrs") + body.Key("Action").String("ExportImage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetAssociatedIpv6PoolCidrsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentExportImageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33268,14 +33618,14 @@ func (m *awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetAwsNetworkPerformanceData struct { +type awsEc2query_serializeOpExportTransitGatewayRoutes struct { } -func (*awsEc2query_serializeOpGetAwsNetworkPerformanceData) ID() string { +func (*awsEc2query_serializeOpExportTransitGatewayRoutes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetAwsNetworkPerformanceData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpExportTransitGatewayRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33287,7 +33637,7 @@ func (m *awsEc2query_serializeOpGetAwsNetworkPerformanceData) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetAwsNetworkPerformanceDataInput) + input, ok := in.Parameters.(*ExportTransitGatewayRoutesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33312,10 +33662,10 @@ func (m *awsEc2query_serializeOpGetAwsNetworkPerformanceData) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetAwsNetworkPerformanceData") + body.Key("Action").String("ExportTransitGatewayRoutes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetAwsNetworkPerformanceDataInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentExportTransitGatewayRoutesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33338,14 +33688,14 @@ func (m *awsEc2query_serializeOpGetAwsNetworkPerformanceData) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetCapacityReservationUsage struct { +type awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration struct { } -func (*awsEc2query_serializeOpGetCapacityReservationUsage) ID() string { +func (*awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetCapacityReservationUsage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpExportVerifiedAccessInstanceClientConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33357,7 +33707,7 @@ func (m *awsEc2query_serializeOpGetCapacityReservationUsage) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetCapacityReservationUsageInput) + input, ok := in.Parameters.(*ExportVerifiedAccessInstanceClientConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33382,10 +33732,10 @@ func (m *awsEc2query_serializeOpGetCapacityReservationUsage) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetCapacityReservationUsage") + body.Key("Action").String("ExportVerifiedAccessInstanceClientConfiguration") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetCapacityReservationUsageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentExportVerifiedAccessInstanceClientConfigurationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33408,14 +33758,14 @@ func (m *awsEc2query_serializeOpGetCapacityReservationUsage) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetCoipPoolUsage struct { +type awsEc2query_serializeOpGetActiveVpnTunnelStatus struct { } -func (*awsEc2query_serializeOpGetCoipPoolUsage) ID() string { +func (*awsEc2query_serializeOpGetActiveVpnTunnelStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetCoipPoolUsage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetActiveVpnTunnelStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33427,7 +33777,7 @@ func (m *awsEc2query_serializeOpGetCoipPoolUsage) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetCoipPoolUsageInput) + input, ok := in.Parameters.(*GetActiveVpnTunnelStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33452,10 +33802,10 @@ func (m *awsEc2query_serializeOpGetCoipPoolUsage) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetCoipPoolUsage") + body.Key("Action").String("GetActiveVpnTunnelStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetCoipPoolUsageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetActiveVpnTunnelStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33478,14 +33828,14 @@ func (m *awsEc2query_serializeOpGetCoipPoolUsage) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetConsoleOutput struct { +type awsEc2query_serializeOpGetAllowedImagesSettings struct { } -func (*awsEc2query_serializeOpGetConsoleOutput) ID() string { +func (*awsEc2query_serializeOpGetAllowedImagesSettings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetConsoleOutput) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33497,7 +33847,7 @@ func (m *awsEc2query_serializeOpGetConsoleOutput) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetConsoleOutputInput) + input, ok := in.Parameters.(*GetAllowedImagesSettingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33522,10 +33872,10 @@ func (m *awsEc2query_serializeOpGetConsoleOutput) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetConsoleOutput") + body.Key("Action").String("GetAllowedImagesSettings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetConsoleOutputInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33548,14 +33898,14 @@ func (m *awsEc2query_serializeOpGetConsoleOutput) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetConsoleScreenshot struct { +type awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles struct { } -func (*awsEc2query_serializeOpGetConsoleScreenshot) ID() string { +func (*awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetConsoleScreenshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetAssociatedEnclaveCertificateIamRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33567,7 +33917,7 @@ func (m *awsEc2query_serializeOpGetConsoleScreenshot) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetConsoleScreenshotInput) + input, ok := in.Parameters.(*GetAssociatedEnclaveCertificateIamRolesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33592,10 +33942,10 @@ func (m *awsEc2query_serializeOpGetConsoleScreenshot) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetConsoleScreenshot") + body.Key("Action").String("GetAssociatedEnclaveCertificateIamRoles") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetConsoleScreenshotInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetAssociatedEnclaveCertificateIamRolesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33618,14 +33968,14 @@ func (m *awsEc2query_serializeOpGetConsoleScreenshot) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetDeclarativePoliciesReportSummary struct { +type awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs struct { } -func (*awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) ID() string { +func (*awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetAssociatedIpv6PoolCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33637,7 +33987,7 @@ func (m *awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetDeclarativePoliciesReportSummaryInput) + input, ok := in.Parameters.(*GetAssociatedIpv6PoolCidrsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33662,10 +34012,10 @@ func (m *awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetDeclarativePoliciesReportSummary") + body.Key("Action").String("GetAssociatedIpv6PoolCidrs") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetDeclarativePoliciesReportSummaryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetAssociatedIpv6PoolCidrsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33688,14 +34038,14 @@ func (m *awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetDefaultCreditSpecification struct { +type awsEc2query_serializeOpGetAwsNetworkPerformanceData struct { } -func (*awsEc2query_serializeOpGetDefaultCreditSpecification) ID() string { +func (*awsEc2query_serializeOpGetAwsNetworkPerformanceData) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetDefaultCreditSpecification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetAwsNetworkPerformanceData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33707,7 +34057,7 @@ func (m *awsEc2query_serializeOpGetDefaultCreditSpecification) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetDefaultCreditSpecificationInput) + input, ok := in.Parameters.(*GetAwsNetworkPerformanceDataInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33732,10 +34082,10 @@ func (m *awsEc2query_serializeOpGetDefaultCreditSpecification) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetDefaultCreditSpecification") + body.Key("Action").String("GetAwsNetworkPerformanceData") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetDefaultCreditSpecificationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetAwsNetworkPerformanceDataInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33758,14 +34108,14 @@ func (m *awsEc2query_serializeOpGetDefaultCreditSpecification) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetEbsDefaultKmsKeyId struct { +type awsEc2query_serializeOpGetCapacityReservationUsage struct { } -func (*awsEc2query_serializeOpGetEbsDefaultKmsKeyId) ID() string { +func (*awsEc2query_serializeOpGetCapacityReservationUsage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetEbsDefaultKmsKeyId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetCapacityReservationUsage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33777,7 +34127,7 @@ func (m *awsEc2query_serializeOpGetEbsDefaultKmsKeyId) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetEbsDefaultKmsKeyIdInput) + input, ok := in.Parameters.(*GetCapacityReservationUsageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33802,10 +34152,10 @@ func (m *awsEc2query_serializeOpGetEbsDefaultKmsKeyId) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetEbsDefaultKmsKeyId") + body.Key("Action").String("GetCapacityReservationUsage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetEbsDefaultKmsKeyIdInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetCapacityReservationUsageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33828,14 +34178,14 @@ func (m *awsEc2query_serializeOpGetEbsDefaultKmsKeyId) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetEbsEncryptionByDefault struct { +type awsEc2query_serializeOpGetCoipPoolUsage struct { } -func (*awsEc2query_serializeOpGetEbsEncryptionByDefault) ID() string { +func (*awsEc2query_serializeOpGetCoipPoolUsage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetEbsEncryptionByDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetCoipPoolUsage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33847,7 +34197,7 @@ func (m *awsEc2query_serializeOpGetEbsEncryptionByDefault) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetEbsEncryptionByDefaultInput) + input, ok := in.Parameters.(*GetCoipPoolUsageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33872,10 +34222,10 @@ func (m *awsEc2query_serializeOpGetEbsEncryptionByDefault) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetEbsEncryptionByDefault") + body.Key("Action").String("GetCoipPoolUsage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetEbsEncryptionByDefaultInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetCoipPoolUsageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33898,14 +34248,14 @@ func (m *awsEc2query_serializeOpGetEbsEncryptionByDefault) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetFlowLogsIntegrationTemplate struct { +type awsEc2query_serializeOpGetConsoleOutput struct { } -func (*awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) ID() string { +func (*awsEc2query_serializeOpGetConsoleOutput) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetConsoleOutput) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33917,7 +34267,7 @@ func (m *awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetFlowLogsIntegrationTemplateInput) + input, ok := in.Parameters.(*GetConsoleOutputInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -33942,10 +34292,10 @@ func (m *awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetFlowLogsIntegrationTemplate") + body.Key("Action").String("GetConsoleOutput") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetFlowLogsIntegrationTemplateInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetConsoleOutputInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -33968,14 +34318,14 @@ func (m *awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetGroupsForCapacityReservation struct { +type awsEc2query_serializeOpGetConsoleScreenshot struct { } -func (*awsEc2query_serializeOpGetGroupsForCapacityReservation) ID() string { +func (*awsEc2query_serializeOpGetConsoleScreenshot) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetGroupsForCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetConsoleScreenshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -33987,7 +34337,7 @@ func (m *awsEc2query_serializeOpGetGroupsForCapacityReservation) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetGroupsForCapacityReservationInput) + input, ok := in.Parameters.(*GetConsoleScreenshotInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34012,10 +34362,10 @@ func (m *awsEc2query_serializeOpGetGroupsForCapacityReservation) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetGroupsForCapacityReservation") + body.Key("Action").String("GetConsoleScreenshot") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetGroupsForCapacityReservationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetConsoleScreenshotInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34038,14 +34388,14 @@ func (m *awsEc2query_serializeOpGetGroupsForCapacityReservation) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetHostReservationPurchasePreview struct { +type awsEc2query_serializeOpGetDeclarativePoliciesReportSummary struct { } -func (*awsEc2query_serializeOpGetHostReservationPurchasePreview) ID() string { +func (*awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetHostReservationPurchasePreview) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetDeclarativePoliciesReportSummary) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34057,7 +34407,7 @@ func (m *awsEc2query_serializeOpGetHostReservationPurchasePreview) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetHostReservationPurchasePreviewInput) + input, ok := in.Parameters.(*GetDeclarativePoliciesReportSummaryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34082,10 +34432,10 @@ func (m *awsEc2query_serializeOpGetHostReservationPurchasePreview) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetHostReservationPurchasePreview") + body.Key("Action").String("GetDeclarativePoliciesReportSummary") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetHostReservationPurchasePreviewInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetDeclarativePoliciesReportSummaryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34108,14 +34458,14 @@ func (m *awsEc2query_serializeOpGetHostReservationPurchasePreview) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetImageBlockPublicAccessState struct { +type awsEc2query_serializeOpGetDefaultCreditSpecification struct { } -func (*awsEc2query_serializeOpGetImageBlockPublicAccessState) ID() string { +func (*awsEc2query_serializeOpGetDefaultCreditSpecification) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetImageBlockPublicAccessState) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetDefaultCreditSpecification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34127,7 +34477,7 @@ func (m *awsEc2query_serializeOpGetImageBlockPublicAccessState) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetImageBlockPublicAccessStateInput) + input, ok := in.Parameters.(*GetDefaultCreditSpecificationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34152,10 +34502,10 @@ func (m *awsEc2query_serializeOpGetImageBlockPublicAccessState) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetImageBlockPublicAccessState") + body.Key("Action").String("GetDefaultCreditSpecification") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetImageBlockPublicAccessStateInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetDefaultCreditSpecificationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34178,14 +34528,14 @@ func (m *awsEc2query_serializeOpGetImageBlockPublicAccessState) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetInstanceMetadataDefaults struct { +type awsEc2query_serializeOpGetEbsDefaultKmsKeyId struct { } -func (*awsEc2query_serializeOpGetInstanceMetadataDefaults) ID() string { +func (*awsEc2query_serializeOpGetEbsDefaultKmsKeyId) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetInstanceMetadataDefaults) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetEbsDefaultKmsKeyId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34197,7 +34547,7 @@ func (m *awsEc2query_serializeOpGetInstanceMetadataDefaults) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetInstanceMetadataDefaultsInput) + input, ok := in.Parameters.(*GetEbsDefaultKmsKeyIdInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34222,10 +34572,10 @@ func (m *awsEc2query_serializeOpGetInstanceMetadataDefaults) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetInstanceMetadataDefaults") + body.Key("Action").String("GetEbsDefaultKmsKeyId") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetInstanceMetadataDefaultsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetEbsDefaultKmsKeyIdInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34248,14 +34598,14 @@ func (m *awsEc2query_serializeOpGetInstanceMetadataDefaults) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetInstanceTpmEkPub struct { +type awsEc2query_serializeOpGetEbsEncryptionByDefault struct { } -func (*awsEc2query_serializeOpGetInstanceTpmEkPub) ID() string { +func (*awsEc2query_serializeOpGetEbsEncryptionByDefault) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetInstanceTpmEkPub) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetEbsEncryptionByDefault) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34267,7 +34617,7 @@ func (m *awsEc2query_serializeOpGetInstanceTpmEkPub) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetInstanceTpmEkPubInput) + input, ok := in.Parameters.(*GetEbsEncryptionByDefaultInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34292,10 +34642,10 @@ func (m *awsEc2query_serializeOpGetInstanceTpmEkPub) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetInstanceTpmEkPub") + body.Key("Action").String("GetEbsEncryptionByDefault") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetInstanceTpmEkPubInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetEbsEncryptionByDefaultInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34318,14 +34668,14 @@ func (m *awsEc2query_serializeOpGetInstanceTpmEkPub) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements struct { +type awsEc2query_serializeOpGetFlowLogsIntegrationTemplate struct { } -func (*awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) ID() string { +func (*awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetFlowLogsIntegrationTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34337,7 +34687,7 @@ func (m *awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) Handle return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetInstanceTypesFromInstanceRequirementsInput) + input, ok := in.Parameters.(*GetFlowLogsIntegrationTemplateInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34362,10 +34712,10 @@ func (m *awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) Handle bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetInstanceTypesFromInstanceRequirements") + body.Key("Action").String("GetFlowLogsIntegrationTemplate") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetInstanceTypesFromInstanceRequirementsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetFlowLogsIntegrationTemplateInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34388,14 +34738,14 @@ func (m *awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) Handle return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetInstanceUefiData struct { +type awsEc2query_serializeOpGetGroupsForCapacityReservation struct { } -func (*awsEc2query_serializeOpGetInstanceUefiData) ID() string { +func (*awsEc2query_serializeOpGetGroupsForCapacityReservation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetInstanceUefiData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetGroupsForCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34407,7 +34757,7 @@ func (m *awsEc2query_serializeOpGetInstanceUefiData) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetInstanceUefiDataInput) + input, ok := in.Parameters.(*GetGroupsForCapacityReservationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34432,10 +34782,10 @@ func (m *awsEc2query_serializeOpGetInstanceUefiData) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetInstanceUefiData") + body.Key("Action").String("GetGroupsForCapacityReservation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetInstanceUefiDataInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetGroupsForCapacityReservationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34458,14 +34808,14 @@ func (m *awsEc2query_serializeOpGetInstanceUefiData) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamAddressHistory struct { +type awsEc2query_serializeOpGetHostReservationPurchasePreview struct { } -func (*awsEc2query_serializeOpGetIpamAddressHistory) ID() string { +func (*awsEc2query_serializeOpGetHostReservationPurchasePreview) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamAddressHistory) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetHostReservationPurchasePreview) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34477,7 +34827,7 @@ func (m *awsEc2query_serializeOpGetIpamAddressHistory) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamAddressHistoryInput) + input, ok := in.Parameters.(*GetHostReservationPurchasePreviewInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34502,10 +34852,10 @@ func (m *awsEc2query_serializeOpGetIpamAddressHistory) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamAddressHistory") + body.Key("Action").String("GetHostReservationPurchasePreview") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamAddressHistoryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetHostReservationPurchasePreviewInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34528,14 +34878,14 @@ func (m *awsEc2query_serializeOpGetIpamAddressHistory) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamDiscoveredAccounts struct { +type awsEc2query_serializeOpGetImageBlockPublicAccessState struct { } -func (*awsEc2query_serializeOpGetIpamDiscoveredAccounts) ID() string { +func (*awsEc2query_serializeOpGetImageBlockPublicAccessState) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamDiscoveredAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetImageBlockPublicAccessState) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34547,7 +34897,7 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredAccounts) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamDiscoveredAccountsInput) + input, ok := in.Parameters.(*GetImageBlockPublicAccessStateInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34572,10 +34922,10 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredAccounts) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamDiscoveredAccounts") + body.Key("Action").String("GetImageBlockPublicAccessState") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamDiscoveredAccountsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetImageBlockPublicAccessStateInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34598,14 +34948,14 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredAccounts) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses struct { +type awsEc2query_serializeOpGetInstanceMetadataDefaults struct { } -func (*awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) ID() string { +func (*awsEc2query_serializeOpGetInstanceMetadataDefaults) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetInstanceMetadataDefaults) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34617,7 +34967,7 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamDiscoveredPublicAddressesInput) + input, ok := in.Parameters.(*GetInstanceMetadataDefaultsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34642,10 +34992,10 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamDiscoveredPublicAddresses") + body.Key("Action").String("GetInstanceMetadataDefaults") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamDiscoveredPublicAddressesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetInstanceMetadataDefaultsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34668,14 +35018,14 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs struct { +type awsEc2query_serializeOpGetInstanceTpmEkPub struct { } -func (*awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) ID() string { +func (*awsEc2query_serializeOpGetInstanceTpmEkPub) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetInstanceTpmEkPub) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34687,7 +35037,7 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamDiscoveredResourceCidrsInput) + input, ok := in.Parameters.(*GetInstanceTpmEkPubInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34712,10 +35062,10 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamDiscoveredResourceCidrs") + body.Key("Action").String("GetInstanceTpmEkPub") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamDiscoveredResourceCidrsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetInstanceTpmEkPubInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34738,14 +35088,14 @@ func (m *awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamPoolAllocations struct { +type awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements struct { } -func (*awsEc2query_serializeOpGetIpamPoolAllocations) ID() string { +func (*awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamPoolAllocations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetInstanceTypesFromInstanceRequirements) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34757,7 +35107,7 @@ func (m *awsEc2query_serializeOpGetIpamPoolAllocations) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamPoolAllocationsInput) + input, ok := in.Parameters.(*GetInstanceTypesFromInstanceRequirementsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34782,10 +35132,10 @@ func (m *awsEc2query_serializeOpGetIpamPoolAllocations) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamPoolAllocations") + body.Key("Action").String("GetInstanceTypesFromInstanceRequirements") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamPoolAllocationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetInstanceTypesFromInstanceRequirementsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34808,14 +35158,14 @@ func (m *awsEc2query_serializeOpGetIpamPoolAllocations) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamPoolCidrs struct { +type awsEc2query_serializeOpGetInstanceUefiData struct { } -func (*awsEc2query_serializeOpGetIpamPoolCidrs) ID() string { +func (*awsEc2query_serializeOpGetInstanceUefiData) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamPoolCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetInstanceUefiData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34827,7 +35177,7 @@ func (m *awsEc2query_serializeOpGetIpamPoolCidrs) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamPoolCidrsInput) + input, ok := in.Parameters.(*GetInstanceUefiDataInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34852,10 +35202,10 @@ func (m *awsEc2query_serializeOpGetIpamPoolCidrs) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamPoolCidrs") + body.Key("Action").String("GetInstanceUefiData") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamPoolCidrsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetInstanceUefiDataInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34878,14 +35228,14 @@ func (m *awsEc2query_serializeOpGetIpamPoolCidrs) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetIpamResourceCidrs struct { +type awsEc2query_serializeOpGetIpamAddressHistory struct { } -func (*awsEc2query_serializeOpGetIpamResourceCidrs) ID() string { +func (*awsEc2query_serializeOpGetIpamAddressHistory) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetIpamResourceCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamAddressHistory) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34897,7 +35247,7 @@ func (m *awsEc2query_serializeOpGetIpamResourceCidrs) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetIpamResourceCidrsInput) + input, ok := in.Parameters.(*GetIpamAddressHistoryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34922,10 +35272,10 @@ func (m *awsEc2query_serializeOpGetIpamResourceCidrs) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetIpamResourceCidrs") + body.Key("Action").String("GetIpamAddressHistory") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetIpamResourceCidrsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamAddressHistoryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -34948,14 +35298,14 @@ func (m *awsEc2query_serializeOpGetIpamResourceCidrs) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetLaunchTemplateData struct { +type awsEc2query_serializeOpGetIpamDiscoveredAccounts struct { } -func (*awsEc2query_serializeOpGetLaunchTemplateData) ID() string { +func (*awsEc2query_serializeOpGetIpamDiscoveredAccounts) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetLaunchTemplateData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamDiscoveredAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -34967,7 +35317,7 @@ func (m *awsEc2query_serializeOpGetLaunchTemplateData) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetLaunchTemplateDataInput) + input, ok := in.Parameters.(*GetIpamDiscoveredAccountsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -34992,10 +35342,10 @@ func (m *awsEc2query_serializeOpGetLaunchTemplateData) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetLaunchTemplateData") + body.Key("Action").String("GetIpamDiscoveredAccounts") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetLaunchTemplateDataInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamDiscoveredAccountsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35018,14 +35368,14 @@ func (m *awsEc2query_serializeOpGetLaunchTemplateData) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetManagedPrefixListAssociations struct { +type awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses struct { } -func (*awsEc2query_serializeOpGetManagedPrefixListAssociations) ID() string { +func (*awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetManagedPrefixListAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamDiscoveredPublicAddresses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35037,7 +35387,7 @@ func (m *awsEc2query_serializeOpGetManagedPrefixListAssociations) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetManagedPrefixListAssociationsInput) + input, ok := in.Parameters.(*GetIpamDiscoveredPublicAddressesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35062,10 +35412,10 @@ func (m *awsEc2query_serializeOpGetManagedPrefixListAssociations) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetManagedPrefixListAssociations") + body.Key("Action").String("GetIpamDiscoveredPublicAddresses") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetManagedPrefixListAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamDiscoveredPublicAddressesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35088,14 +35438,14 @@ func (m *awsEc2query_serializeOpGetManagedPrefixListAssociations) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetManagedPrefixListEntries struct { +type awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs struct { } -func (*awsEc2query_serializeOpGetManagedPrefixListEntries) ID() string { +func (*awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetManagedPrefixListEntries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamDiscoveredResourceCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35107,7 +35457,7 @@ func (m *awsEc2query_serializeOpGetManagedPrefixListEntries) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetManagedPrefixListEntriesInput) + input, ok := in.Parameters.(*GetIpamDiscoveredResourceCidrsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35132,10 +35482,10 @@ func (m *awsEc2query_serializeOpGetManagedPrefixListEntries) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetManagedPrefixListEntries") + body.Key("Action").String("GetIpamDiscoveredResourceCidrs") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetManagedPrefixListEntriesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamDiscoveredResourceCidrsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35158,14 +35508,14 @@ func (m *awsEc2query_serializeOpGetManagedPrefixListEntries) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings struct { +type awsEc2query_serializeOpGetIpamPoolAllocations struct { } -func (*awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) ID() string { +func (*awsEc2query_serializeOpGetIpamPoolAllocations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamPoolAllocations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35177,7 +35527,7 @@ func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) H return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetNetworkInsightsAccessScopeAnalysisFindingsInput) + input, ok := in.Parameters.(*GetIpamPoolAllocationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35202,10 +35552,10 @@ func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) H bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetNetworkInsightsAccessScopeAnalysisFindings") + body.Key("Action").String("GetIpamPoolAllocations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetNetworkInsightsAccessScopeAnalysisFindingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamPoolAllocationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35228,14 +35578,14 @@ func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) H return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent struct { +type awsEc2query_serializeOpGetIpamPoolCidrs struct { } -func (*awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) ID() string { +func (*awsEc2query_serializeOpGetIpamPoolCidrs) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamPoolCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35247,7 +35597,7 @@ func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetNetworkInsightsAccessScopeContentInput) + input, ok := in.Parameters.(*GetIpamPoolCidrsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35272,10 +35622,10 @@ func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetNetworkInsightsAccessScopeContent") + body.Key("Action").String("GetIpamPoolCidrs") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetNetworkInsightsAccessScopeContentInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamPoolCidrsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35298,14 +35648,14 @@ func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetPasswordData struct { +type awsEc2query_serializeOpGetIpamResourceCidrs struct { } -func (*awsEc2query_serializeOpGetPasswordData) ID() string { +func (*awsEc2query_serializeOpGetIpamResourceCidrs) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetPasswordData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetIpamResourceCidrs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35317,7 +35667,7 @@ func (m *awsEc2query_serializeOpGetPasswordData) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetPasswordDataInput) + input, ok := in.Parameters.(*GetIpamResourceCidrsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35342,10 +35692,10 @@ func (m *awsEc2query_serializeOpGetPasswordData) HandleSerialize(ctx context.Con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetPasswordData") + body.Key("Action").String("GetIpamResourceCidrs") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetPasswordDataInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetIpamResourceCidrsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35368,14 +35718,14 @@ func (m *awsEc2query_serializeOpGetPasswordData) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetReservedInstancesExchangeQuote struct { +type awsEc2query_serializeOpGetLaunchTemplateData struct { } -func (*awsEc2query_serializeOpGetReservedInstancesExchangeQuote) ID() string { +func (*awsEc2query_serializeOpGetLaunchTemplateData) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetReservedInstancesExchangeQuote) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetLaunchTemplateData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35387,7 +35737,7 @@ func (m *awsEc2query_serializeOpGetReservedInstancesExchangeQuote) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetReservedInstancesExchangeQuoteInput) + input, ok := in.Parameters.(*GetLaunchTemplateDataInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35412,10 +35762,10 @@ func (m *awsEc2query_serializeOpGetReservedInstancesExchangeQuote) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetReservedInstancesExchangeQuote") + body.Key("Action").String("GetLaunchTemplateData") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetReservedInstancesExchangeQuoteInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetLaunchTemplateDataInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35438,14 +35788,14 @@ func (m *awsEc2query_serializeOpGetReservedInstancesExchangeQuote) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetRouteServerAssociations struct { +type awsEc2query_serializeOpGetManagedPrefixListAssociations struct { } -func (*awsEc2query_serializeOpGetRouteServerAssociations) ID() string { +func (*awsEc2query_serializeOpGetManagedPrefixListAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetRouteServerAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetManagedPrefixListAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35457,7 +35807,7 @@ func (m *awsEc2query_serializeOpGetRouteServerAssociations) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetRouteServerAssociationsInput) + input, ok := in.Parameters.(*GetManagedPrefixListAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35482,10 +35832,10 @@ func (m *awsEc2query_serializeOpGetRouteServerAssociations) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetRouteServerAssociations") + body.Key("Action").String("GetManagedPrefixListAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetRouteServerAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetManagedPrefixListAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35508,14 +35858,14 @@ func (m *awsEc2query_serializeOpGetRouteServerAssociations) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetRouteServerPropagations struct { +type awsEc2query_serializeOpGetManagedPrefixListEntries struct { } -func (*awsEc2query_serializeOpGetRouteServerPropagations) ID() string { +func (*awsEc2query_serializeOpGetManagedPrefixListEntries) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetRouteServerPropagations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetManagedPrefixListEntries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35527,7 +35877,7 @@ func (m *awsEc2query_serializeOpGetRouteServerPropagations) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetRouteServerPropagationsInput) + input, ok := in.Parameters.(*GetManagedPrefixListEntriesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35552,10 +35902,10 @@ func (m *awsEc2query_serializeOpGetRouteServerPropagations) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetRouteServerPropagations") + body.Key("Action").String("GetManagedPrefixListEntries") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetRouteServerPropagationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetManagedPrefixListEntriesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35578,14 +35928,14 @@ func (m *awsEc2query_serializeOpGetRouteServerPropagations) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetRouteServerRoutingDatabase struct { +type awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings struct { } -func (*awsEc2query_serializeOpGetRouteServerRoutingDatabase) ID() string { +func (*awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetRouteServerRoutingDatabase) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeAnalysisFindings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35597,7 +35947,7 @@ func (m *awsEc2query_serializeOpGetRouteServerRoutingDatabase) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetRouteServerRoutingDatabaseInput) + input, ok := in.Parameters.(*GetNetworkInsightsAccessScopeAnalysisFindingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35622,10 +35972,10 @@ func (m *awsEc2query_serializeOpGetRouteServerRoutingDatabase) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetRouteServerRoutingDatabase") + body.Key("Action").String("GetNetworkInsightsAccessScopeAnalysisFindings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetRouteServerRoutingDatabaseInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetNetworkInsightsAccessScopeAnalysisFindingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35648,14 +35998,14 @@ func (m *awsEc2query_serializeOpGetRouteServerRoutingDatabase) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetSecurityGroupsForVpc struct { +type awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent struct { } -func (*awsEc2query_serializeOpGetSecurityGroupsForVpc) ID() string { +func (*awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetSecurityGroupsForVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetNetworkInsightsAccessScopeContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35667,7 +36017,7 @@ func (m *awsEc2query_serializeOpGetSecurityGroupsForVpc) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetSecurityGroupsForVpcInput) + input, ok := in.Parameters.(*GetNetworkInsightsAccessScopeContentInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35692,10 +36042,10 @@ func (m *awsEc2query_serializeOpGetSecurityGroupsForVpc) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetSecurityGroupsForVpc") + body.Key("Action").String("GetNetworkInsightsAccessScopeContent") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetSecurityGroupsForVpcInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetNetworkInsightsAccessScopeContentInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35718,14 +36068,14 @@ func (m *awsEc2query_serializeOpGetSecurityGroupsForVpc) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetSerialConsoleAccessStatus struct { +type awsEc2query_serializeOpGetPasswordData struct { } -func (*awsEc2query_serializeOpGetSerialConsoleAccessStatus) ID() string { +func (*awsEc2query_serializeOpGetPasswordData) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetSerialConsoleAccessStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetPasswordData) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35737,7 +36087,7 @@ func (m *awsEc2query_serializeOpGetSerialConsoleAccessStatus) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetSerialConsoleAccessStatusInput) + input, ok := in.Parameters.(*GetPasswordDataInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35762,10 +36112,10 @@ func (m *awsEc2query_serializeOpGetSerialConsoleAccessStatus) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetSerialConsoleAccessStatus") + body.Key("Action").String("GetPasswordData") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetSerialConsoleAccessStatusInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetPasswordDataInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35788,14 +36138,14 @@ func (m *awsEc2query_serializeOpGetSerialConsoleAccessStatus) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetSnapshotBlockPublicAccessState struct { +type awsEc2query_serializeOpGetReservedInstancesExchangeQuote struct { } -func (*awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) ID() string { +func (*awsEc2query_serializeOpGetReservedInstancesExchangeQuote) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetReservedInstancesExchangeQuote) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35807,7 +36157,7 @@ func (m *awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetSnapshotBlockPublicAccessStateInput) + input, ok := in.Parameters.(*GetReservedInstancesExchangeQuoteInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35832,10 +36182,10 @@ func (m *awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetSnapshotBlockPublicAccessState") + body.Key("Action").String("GetReservedInstancesExchangeQuote") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetSnapshotBlockPublicAccessStateInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetReservedInstancesExchangeQuoteInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35858,14 +36208,14 @@ func (m *awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetSpotPlacementScores struct { +type awsEc2query_serializeOpGetRouteServerAssociations struct { } -func (*awsEc2query_serializeOpGetSpotPlacementScores) ID() string { +func (*awsEc2query_serializeOpGetRouteServerAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetSpotPlacementScores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetRouteServerAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35877,7 +36227,7 @@ func (m *awsEc2query_serializeOpGetSpotPlacementScores) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetSpotPlacementScoresInput) + input, ok := in.Parameters.(*GetRouteServerAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35902,10 +36252,10 @@ func (m *awsEc2query_serializeOpGetSpotPlacementScores) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetSpotPlacementScores") + body.Key("Action").String("GetRouteServerAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetSpotPlacementScoresInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetRouteServerAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35928,14 +36278,14 @@ func (m *awsEc2query_serializeOpGetSpotPlacementScores) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetSubnetCidrReservations struct { +type awsEc2query_serializeOpGetRouteServerPropagations struct { } -func (*awsEc2query_serializeOpGetSubnetCidrReservations) ID() string { +func (*awsEc2query_serializeOpGetRouteServerPropagations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetSubnetCidrReservations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetRouteServerPropagations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -35947,7 +36297,7 @@ func (m *awsEc2query_serializeOpGetSubnetCidrReservations) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetSubnetCidrReservationsInput) + input, ok := in.Parameters.(*GetRouteServerPropagationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -35972,10 +36322,10 @@ func (m *awsEc2query_serializeOpGetSubnetCidrReservations) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetSubnetCidrReservations") + body.Key("Action").String("GetRouteServerPropagations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetSubnetCidrReservationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetRouteServerPropagationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -35998,14 +36348,14 @@ func (m *awsEc2query_serializeOpGetSubnetCidrReservations) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations struct { +type awsEc2query_serializeOpGetRouteServerRoutingDatabase struct { } -func (*awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) ID() string { +func (*awsEc2query_serializeOpGetRouteServerRoutingDatabase) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetRouteServerRoutingDatabase) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36017,7 +36367,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayAttachmentPropagationsInput) + input, ok := in.Parameters.(*GetRouteServerRoutingDatabaseInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36042,10 +36392,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayAttachmentPropagations") + body.Key("Action").String("GetRouteServerRoutingDatabase") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayAttachmentPropagationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetRouteServerRoutingDatabaseInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36068,14 +36418,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations struct { +type awsEc2query_serializeOpGetSecurityGroupsForVpc struct { } -func (*awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) ID() string { +func (*awsEc2query_serializeOpGetSecurityGroupsForVpc) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetSecurityGroupsForVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36087,7 +36437,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) Ha return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayMulticastDomainAssociationsInput) + input, ok := in.Parameters.(*GetSecurityGroupsForVpcInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36112,10 +36462,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) Ha bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayMulticastDomainAssociations") + body.Key("Action").String("GetSecurityGroupsForVpc") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayMulticastDomainAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetSecurityGroupsForVpcInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36138,14 +36488,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) Ha return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations struct { +type awsEc2query_serializeOpGetSerialConsoleAccessStatus struct { } -func (*awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) ID() string { +func (*awsEc2query_serializeOpGetSerialConsoleAccessStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetSerialConsoleAccessStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36157,7 +36507,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) Handle return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayPolicyTableAssociationsInput) + input, ok := in.Parameters.(*GetSerialConsoleAccessStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36182,10 +36532,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) Handle bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayPolicyTableAssociations") + body.Key("Action").String("GetSerialConsoleAccessStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayPolicyTableAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetSerialConsoleAccessStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36208,14 +36558,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) Handle return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries struct { +type awsEc2query_serializeOpGetSnapshotBlockPublicAccessState struct { } -func (*awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) ID() string { +func (*awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetSnapshotBlockPublicAccessState) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36227,7 +36577,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayPolicyTableEntriesInput) + input, ok := in.Parameters.(*GetSnapshotBlockPublicAccessStateInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36252,10 +36602,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayPolicyTableEntries") + body.Key("Action").String("GetSnapshotBlockPublicAccessState") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayPolicyTableEntriesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetSnapshotBlockPublicAccessStateInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36278,14 +36628,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayPrefixListReferences struct { +type awsEc2query_serializeOpGetSpotPlacementScores struct { } -func (*awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) ID() string { +func (*awsEc2query_serializeOpGetSpotPlacementScores) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetSpotPlacementScores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36297,7 +36647,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayPrefixListReferencesInput) + input, ok := in.Parameters.(*GetSpotPlacementScoresInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36322,10 +36672,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayPrefixListReferences") + body.Key("Action").String("GetSpotPlacementScores") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayPrefixListReferencesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetSpotPlacementScoresInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36348,14 +36698,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations struct { +type awsEc2query_serializeOpGetSubnetCidrReservations struct { } -func (*awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) ID() string { +func (*awsEc2query_serializeOpGetSubnetCidrReservations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetSubnetCidrReservations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36367,7 +36717,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayRouteTableAssociationsInput) + input, ok := in.Parameters.(*GetSubnetCidrReservationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36392,10 +36742,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayRouteTableAssociations") + body.Key("Action").String("GetSubnetCidrReservations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayRouteTableAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetSubnetCidrReservationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36418,14 +36768,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations struct { +type awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations struct { } -func (*awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayAttachmentPropagations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36437,7 +36787,7 @@ func (m *awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetTransitGatewayRouteTablePropagationsInput) + input, ok := in.Parameters.(*GetTransitGatewayAttachmentPropagationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36462,10 +36812,10 @@ func (m *awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetTransitGatewayRouteTablePropagations") + body.Key("Action").String("GetTransitGatewayAttachmentPropagations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetTransitGatewayRouteTablePropagationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayAttachmentPropagationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36488,14 +36838,14 @@ func (m *awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy struct { +type awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations struct { } -func (*awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayMulticastDomainAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36507,7 +36857,7 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetVerifiedAccessEndpointPolicyInput) + input, ok := in.Parameters.(*GetTransitGatewayMulticastDomainAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36532,10 +36882,10 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetVerifiedAccessEndpointPolicy") + body.Key("Action").String("GetTransitGatewayMulticastDomainAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetVerifiedAccessEndpointPolicyInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayMulticastDomainAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36558,14 +36908,14 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetVerifiedAccessEndpointTargets struct { +type awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations struct { } -func (*awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36577,7 +36927,7 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetVerifiedAccessEndpointTargetsInput) + input, ok := in.Parameters.(*GetTransitGatewayPolicyTableAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36602,10 +36952,10 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetVerifiedAccessEndpointTargets") + body.Key("Action").String("GetTransitGatewayPolicyTableAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetVerifiedAccessEndpointTargetsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayPolicyTableAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36628,14 +36978,14 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetVerifiedAccessGroupPolicy struct { +type awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries struct { } -func (*awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayPolicyTableEntries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36647,7 +36997,7 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetVerifiedAccessGroupPolicyInput) + input, ok := in.Parameters.(*GetTransitGatewayPolicyTableEntriesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36672,10 +37022,10 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetVerifiedAccessGroupPolicy") + body.Key("Action").String("GetTransitGatewayPolicyTableEntries") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetVerifiedAccessGroupPolicyInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayPolicyTableEntriesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36698,14 +37048,14 @@ func (m *awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration struct { +type awsEc2query_serializeOpGetTransitGatewayPrefixListReferences struct { } -func (*awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayPrefixListReferences) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36717,7 +37067,7 @@ func (m *awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) Handl return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetVpnConnectionDeviceSampleConfigurationInput) + input, ok := in.Parameters.(*GetTransitGatewayPrefixListReferencesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36742,10 +37092,10 @@ func (m *awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) Handl bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetVpnConnectionDeviceSampleConfiguration") + body.Key("Action").String("GetTransitGatewayPrefixListReferences") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetVpnConnectionDeviceSampleConfigurationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayPrefixListReferencesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36768,14 +37118,14 @@ func (m *awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) Handl return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetVpnConnectionDeviceTypes struct { +type awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations struct { } -func (*awsEc2query_serializeOpGetVpnConnectionDeviceTypes) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetVpnConnectionDeviceTypes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayRouteTableAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36787,7 +37137,7 @@ func (m *awsEc2query_serializeOpGetVpnConnectionDeviceTypes) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetVpnConnectionDeviceTypesInput) + input, ok := in.Parameters.(*GetTransitGatewayRouteTableAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36812,10 +37162,10 @@ func (m *awsEc2query_serializeOpGetVpnConnectionDeviceTypes) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetVpnConnectionDeviceTypes") + body.Key("Action").String("GetTransitGatewayRouteTableAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetVpnConnectionDeviceTypesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayRouteTableAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36838,14 +37188,14 @@ func (m *awsEc2query_serializeOpGetVpnConnectionDeviceTypes) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpGetVpnTunnelReplacementStatus struct { +type awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations struct { } -func (*awsEc2query_serializeOpGetVpnTunnelReplacementStatus) ID() string { +func (*awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpGetVpnTunnelReplacementStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetTransitGatewayRouteTablePropagations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36857,7 +37207,7 @@ func (m *awsEc2query_serializeOpGetVpnTunnelReplacementStatus) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetVpnTunnelReplacementStatusInput) + input, ok := in.Parameters.(*GetTransitGatewayRouteTablePropagationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36882,10 +37232,10 @@ func (m *awsEc2query_serializeOpGetVpnTunnelReplacementStatus) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("GetVpnTunnelReplacementStatus") + body.Key("Action").String("GetTransitGatewayRouteTablePropagations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentGetVpnTunnelReplacementStatusInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetTransitGatewayRouteTablePropagationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36908,14 +37258,14 @@ func (m *awsEc2query_serializeOpGetVpnTunnelReplacementStatus) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList struct { +type awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy struct { } -func (*awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) ID() string { +func (*awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36927,7 +37277,7 @@ func (m *awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ImportClientVpnClientCertificateRevocationListInput) + input, ok := in.Parameters.(*GetVerifiedAccessEndpointPolicyInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -36952,10 +37302,10 @@ func (m *awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ImportClientVpnClientCertificateRevocationList") + body.Key("Action").String("GetVerifiedAccessEndpointPolicy") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentImportClientVpnClientCertificateRevocationListInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetVerifiedAccessEndpointPolicyInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -36978,14 +37328,14 @@ func (m *awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpImportImage struct { +type awsEc2query_serializeOpGetVerifiedAccessEndpointTargets struct { } -func (*awsEc2query_serializeOpImportImage) ID() string { +func (*awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpImportImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetVerifiedAccessEndpointTargets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -36997,7 +37347,7 @@ func (m *awsEc2query_serializeOpImportImage) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ImportImageInput) + input, ok := in.Parameters.(*GetVerifiedAccessEndpointTargetsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37022,10 +37372,10 @@ func (m *awsEc2query_serializeOpImportImage) HandleSerialize(ctx context.Context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ImportImage") + body.Key("Action").String("GetVerifiedAccessEndpointTargets") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentImportImageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetVerifiedAccessEndpointTargetsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37048,14 +37398,14 @@ func (m *awsEc2query_serializeOpImportImage) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpImportInstance struct { +type awsEc2query_serializeOpGetVerifiedAccessGroupPolicy struct { } -func (*awsEc2query_serializeOpImportInstance) ID() string { +func (*awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpImportInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetVerifiedAccessGroupPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37067,7 +37417,7 @@ func (m *awsEc2query_serializeOpImportInstance) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ImportInstanceInput) + input, ok := in.Parameters.(*GetVerifiedAccessGroupPolicyInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37092,10 +37442,10 @@ func (m *awsEc2query_serializeOpImportInstance) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ImportInstance") + body.Key("Action").String("GetVerifiedAccessGroupPolicy") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentImportInstanceInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetVerifiedAccessGroupPolicyInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37118,14 +37468,14 @@ func (m *awsEc2query_serializeOpImportInstance) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpImportKeyPair struct { +type awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration struct { } -func (*awsEc2query_serializeOpImportKeyPair) ID() string { +func (*awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpImportKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetVpnConnectionDeviceSampleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37137,7 +37487,7 @@ func (m *awsEc2query_serializeOpImportKeyPair) HandleSerialize(ctx context.Conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ImportKeyPairInput) + input, ok := in.Parameters.(*GetVpnConnectionDeviceSampleConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37162,10 +37512,10 @@ func (m *awsEc2query_serializeOpImportKeyPair) HandleSerialize(ctx context.Conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ImportKeyPair") + body.Key("Action").String("GetVpnConnectionDeviceSampleConfiguration") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentImportKeyPairInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetVpnConnectionDeviceSampleConfigurationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37188,14 +37538,14 @@ func (m *awsEc2query_serializeOpImportKeyPair) HandleSerialize(ctx context.Conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpImportSnapshot struct { +type awsEc2query_serializeOpGetVpnConnectionDeviceTypes struct { } -func (*awsEc2query_serializeOpImportSnapshot) ID() string { +func (*awsEc2query_serializeOpGetVpnConnectionDeviceTypes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpImportSnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetVpnConnectionDeviceTypes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37207,7 +37557,7 @@ func (m *awsEc2query_serializeOpImportSnapshot) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ImportSnapshotInput) + input, ok := in.Parameters.(*GetVpnConnectionDeviceTypesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37232,10 +37582,10 @@ func (m *awsEc2query_serializeOpImportSnapshot) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ImportSnapshot") + body.Key("Action").String("GetVpnConnectionDeviceTypes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentImportSnapshotInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetVpnConnectionDeviceTypesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37258,14 +37608,14 @@ func (m *awsEc2query_serializeOpImportSnapshot) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpImportVolume struct { +type awsEc2query_serializeOpGetVpnTunnelReplacementStatus struct { } -func (*awsEc2query_serializeOpImportVolume) ID() string { +func (*awsEc2query_serializeOpGetVpnTunnelReplacementStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpImportVolume) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpGetVpnTunnelReplacementStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37277,7 +37627,7 @@ func (m *awsEc2query_serializeOpImportVolume) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ImportVolumeInput) + input, ok := in.Parameters.(*GetVpnTunnelReplacementStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37302,10 +37652,10 @@ func (m *awsEc2query_serializeOpImportVolume) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ImportVolume") + body.Key("Action").String("GetVpnTunnelReplacementStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentImportVolumeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentGetVpnTunnelReplacementStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37328,14 +37678,14 @@ func (m *awsEc2query_serializeOpImportVolume) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpListImagesInRecycleBin struct { +type awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList struct { } -func (*awsEc2query_serializeOpListImagesInRecycleBin) ID() string { +func (*awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpListImagesInRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpImportClientVpnClientCertificateRevocationList) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37347,7 +37697,7 @@ func (m *awsEc2query_serializeOpListImagesInRecycleBin) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListImagesInRecycleBinInput) + input, ok := in.Parameters.(*ImportClientVpnClientCertificateRevocationListInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37372,10 +37722,10 @@ func (m *awsEc2query_serializeOpListImagesInRecycleBin) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ListImagesInRecycleBin") + body.Key("Action").String("ImportClientVpnClientCertificateRevocationList") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentListImagesInRecycleBinInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentImportClientVpnClientCertificateRevocationListInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37398,14 +37748,14 @@ func (m *awsEc2query_serializeOpListImagesInRecycleBin) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpListSnapshotsInRecycleBin struct { +type awsEc2query_serializeOpImportImage struct { } -func (*awsEc2query_serializeOpListSnapshotsInRecycleBin) ID() string { +func (*awsEc2query_serializeOpImportImage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpListSnapshotsInRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpImportImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37417,7 +37767,7 @@ func (m *awsEc2query_serializeOpListSnapshotsInRecycleBin) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ListSnapshotsInRecycleBinInput) + input, ok := in.Parameters.(*ImportImageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37442,10 +37792,10 @@ func (m *awsEc2query_serializeOpListSnapshotsInRecycleBin) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ListSnapshotsInRecycleBin") + body.Key("Action").String("ImportImage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentListSnapshotsInRecycleBinInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentImportImageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37468,14 +37818,14 @@ func (m *awsEc2query_serializeOpListSnapshotsInRecycleBin) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpLockSnapshot struct { +type awsEc2query_serializeOpImportInstance struct { } -func (*awsEc2query_serializeOpLockSnapshot) ID() string { +func (*awsEc2query_serializeOpImportInstance) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpLockSnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpImportInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37487,7 +37837,7 @@ func (m *awsEc2query_serializeOpLockSnapshot) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*LockSnapshotInput) + input, ok := in.Parameters.(*ImportInstanceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37512,10 +37862,10 @@ func (m *awsEc2query_serializeOpLockSnapshot) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("LockSnapshot") + body.Key("Action").String("ImportInstance") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentLockSnapshotInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentImportInstanceInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37538,14 +37888,14 @@ func (m *awsEc2query_serializeOpLockSnapshot) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyAddressAttribute struct { +type awsEc2query_serializeOpImportKeyPair struct { } -func (*awsEc2query_serializeOpModifyAddressAttribute) ID() string { +func (*awsEc2query_serializeOpImportKeyPair) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyAddressAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpImportKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37557,7 +37907,7 @@ func (m *awsEc2query_serializeOpModifyAddressAttribute) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyAddressAttributeInput) + input, ok := in.Parameters.(*ImportKeyPairInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37582,10 +37932,10 @@ func (m *awsEc2query_serializeOpModifyAddressAttribute) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyAddressAttribute") + body.Key("Action").String("ImportKeyPair") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyAddressAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentImportKeyPairInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37608,14 +37958,14 @@ func (m *awsEc2query_serializeOpModifyAddressAttribute) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyAvailabilityZoneGroup struct { +type awsEc2query_serializeOpImportSnapshot struct { } -func (*awsEc2query_serializeOpModifyAvailabilityZoneGroup) ID() string { +func (*awsEc2query_serializeOpImportSnapshot) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyAvailabilityZoneGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpImportSnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37627,7 +37977,7 @@ func (m *awsEc2query_serializeOpModifyAvailabilityZoneGroup) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyAvailabilityZoneGroupInput) + input, ok := in.Parameters.(*ImportSnapshotInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37652,10 +38002,10 @@ func (m *awsEc2query_serializeOpModifyAvailabilityZoneGroup) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyAvailabilityZoneGroup") + body.Key("Action").String("ImportSnapshot") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyAvailabilityZoneGroupInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentImportSnapshotInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37678,14 +38028,14 @@ func (m *awsEc2query_serializeOpModifyAvailabilityZoneGroup) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyCapacityReservation struct { +type awsEc2query_serializeOpImportVolume struct { } -func (*awsEc2query_serializeOpModifyCapacityReservation) ID() string { +func (*awsEc2query_serializeOpImportVolume) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpImportVolume) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37697,7 +38047,7 @@ func (m *awsEc2query_serializeOpModifyCapacityReservation) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyCapacityReservationInput) + input, ok := in.Parameters.(*ImportVolumeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37722,10 +38072,10 @@ func (m *awsEc2query_serializeOpModifyCapacityReservation) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyCapacityReservation") + body.Key("Action").String("ImportVolume") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyCapacityReservationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentImportVolumeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37748,14 +38098,14 @@ func (m *awsEc2query_serializeOpModifyCapacityReservation) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyCapacityReservationFleet struct { +type awsEc2query_serializeOpListImagesInRecycleBin struct { } -func (*awsEc2query_serializeOpModifyCapacityReservationFleet) ID() string { +func (*awsEc2query_serializeOpListImagesInRecycleBin) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyCapacityReservationFleet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpListImagesInRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37767,7 +38117,7 @@ func (m *awsEc2query_serializeOpModifyCapacityReservationFleet) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyCapacityReservationFleetInput) + input, ok := in.Parameters.(*ListImagesInRecycleBinInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37792,10 +38142,10 @@ func (m *awsEc2query_serializeOpModifyCapacityReservationFleet) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyCapacityReservationFleet") + body.Key("Action").String("ListImagesInRecycleBin") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyCapacityReservationFleetInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentListImagesInRecycleBinInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37818,14 +38168,14 @@ func (m *awsEc2query_serializeOpModifyCapacityReservationFleet) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyClientVpnEndpoint struct { +type awsEc2query_serializeOpListSnapshotsInRecycleBin struct { } -func (*awsEc2query_serializeOpModifyClientVpnEndpoint) ID() string { +func (*awsEc2query_serializeOpListSnapshotsInRecycleBin) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyClientVpnEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpListSnapshotsInRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37837,7 +38187,7 @@ func (m *awsEc2query_serializeOpModifyClientVpnEndpoint) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyClientVpnEndpointInput) + input, ok := in.Parameters.(*ListSnapshotsInRecycleBinInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37862,10 +38212,10 @@ func (m *awsEc2query_serializeOpModifyClientVpnEndpoint) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyClientVpnEndpoint") + body.Key("Action").String("ListSnapshotsInRecycleBin") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyClientVpnEndpointInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentListSnapshotsInRecycleBinInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37888,14 +38238,14 @@ func (m *awsEc2query_serializeOpModifyClientVpnEndpoint) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyDefaultCreditSpecification struct { +type awsEc2query_serializeOpLockSnapshot struct { } -func (*awsEc2query_serializeOpModifyDefaultCreditSpecification) ID() string { +func (*awsEc2query_serializeOpLockSnapshot) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyDefaultCreditSpecification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpLockSnapshot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37907,7 +38257,7 @@ func (m *awsEc2query_serializeOpModifyDefaultCreditSpecification) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyDefaultCreditSpecificationInput) + input, ok := in.Parameters.(*LockSnapshotInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -37932,10 +38282,10 @@ func (m *awsEc2query_serializeOpModifyDefaultCreditSpecification) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyDefaultCreditSpecification") + body.Key("Action").String("LockSnapshot") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyDefaultCreditSpecificationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentLockSnapshotInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -37958,14 +38308,14 @@ func (m *awsEc2query_serializeOpModifyDefaultCreditSpecification) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyEbsDefaultKmsKeyId struct { +type awsEc2query_serializeOpModifyAddressAttribute struct { } -func (*awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) ID() string { +func (*awsEc2query_serializeOpModifyAddressAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyAddressAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -37977,7 +38327,7 @@ func (m *awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyEbsDefaultKmsKeyIdInput) + input, ok := in.Parameters.(*ModifyAddressAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38002,10 +38352,10 @@ func (m *awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyEbsDefaultKmsKeyId") + body.Key("Action").String("ModifyAddressAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyEbsDefaultKmsKeyIdInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyAddressAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38028,14 +38378,14 @@ func (m *awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyFleet struct { +type awsEc2query_serializeOpModifyAvailabilityZoneGroup struct { } -func (*awsEc2query_serializeOpModifyFleet) ID() string { +func (*awsEc2query_serializeOpModifyAvailabilityZoneGroup) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyFleet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyAvailabilityZoneGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38047,7 +38397,7 @@ func (m *awsEc2query_serializeOpModifyFleet) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyFleetInput) + input, ok := in.Parameters.(*ModifyAvailabilityZoneGroupInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38072,10 +38422,10 @@ func (m *awsEc2query_serializeOpModifyFleet) HandleSerialize(ctx context.Context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyFleet") + body.Key("Action").String("ModifyAvailabilityZoneGroup") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyFleetInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyAvailabilityZoneGroupInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38098,14 +38448,14 @@ func (m *awsEc2query_serializeOpModifyFleet) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyFpgaImageAttribute struct { +type awsEc2query_serializeOpModifyCapacityReservation struct { } -func (*awsEc2query_serializeOpModifyFpgaImageAttribute) ID() string { +func (*awsEc2query_serializeOpModifyCapacityReservation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyFpgaImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38117,7 +38467,7 @@ func (m *awsEc2query_serializeOpModifyFpgaImageAttribute) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyFpgaImageAttributeInput) + input, ok := in.Parameters.(*ModifyCapacityReservationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38142,10 +38492,10 @@ func (m *awsEc2query_serializeOpModifyFpgaImageAttribute) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyFpgaImageAttribute") + body.Key("Action").String("ModifyCapacityReservation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyFpgaImageAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyCapacityReservationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38168,14 +38518,14 @@ func (m *awsEc2query_serializeOpModifyFpgaImageAttribute) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyHosts struct { +type awsEc2query_serializeOpModifyCapacityReservationFleet struct { } -func (*awsEc2query_serializeOpModifyHosts) ID() string { +func (*awsEc2query_serializeOpModifyCapacityReservationFleet) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyCapacityReservationFleet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38187,7 +38537,7 @@ func (m *awsEc2query_serializeOpModifyHosts) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyHostsInput) + input, ok := in.Parameters.(*ModifyCapacityReservationFleetInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38212,10 +38562,10 @@ func (m *awsEc2query_serializeOpModifyHosts) HandleSerialize(ctx context.Context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyHosts") + body.Key("Action").String("ModifyCapacityReservationFleet") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyHostsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyCapacityReservationFleetInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38238,14 +38588,14 @@ func (m *awsEc2query_serializeOpModifyHosts) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIdentityIdFormat struct { +type awsEc2query_serializeOpModifyClientVpnEndpoint struct { } -func (*awsEc2query_serializeOpModifyIdentityIdFormat) ID() string { +func (*awsEc2query_serializeOpModifyClientVpnEndpoint) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIdentityIdFormat) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyClientVpnEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38257,7 +38607,7 @@ func (m *awsEc2query_serializeOpModifyIdentityIdFormat) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIdentityIdFormatInput) + input, ok := in.Parameters.(*ModifyClientVpnEndpointInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38282,10 +38632,10 @@ func (m *awsEc2query_serializeOpModifyIdentityIdFormat) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIdentityIdFormat") + body.Key("Action").String("ModifyClientVpnEndpoint") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIdentityIdFormatInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyClientVpnEndpointInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38308,14 +38658,14 @@ func (m *awsEc2query_serializeOpModifyIdentityIdFormat) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIdFormat struct { +type awsEc2query_serializeOpModifyDefaultCreditSpecification struct { } -func (*awsEc2query_serializeOpModifyIdFormat) ID() string { +func (*awsEc2query_serializeOpModifyDefaultCreditSpecification) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIdFormat) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyDefaultCreditSpecification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38327,7 +38677,7 @@ func (m *awsEc2query_serializeOpModifyIdFormat) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIdFormatInput) + input, ok := in.Parameters.(*ModifyDefaultCreditSpecificationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38352,10 +38702,10 @@ func (m *awsEc2query_serializeOpModifyIdFormat) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIdFormat") + body.Key("Action").String("ModifyDefaultCreditSpecification") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIdFormatInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyDefaultCreditSpecificationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38378,14 +38728,14 @@ func (m *awsEc2query_serializeOpModifyIdFormat) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyImageAttribute struct { +type awsEc2query_serializeOpModifyEbsDefaultKmsKeyId struct { } -func (*awsEc2query_serializeOpModifyImageAttribute) ID() string { +func (*awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyEbsDefaultKmsKeyId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38397,7 +38747,7 @@ func (m *awsEc2query_serializeOpModifyImageAttribute) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyImageAttributeInput) + input, ok := in.Parameters.(*ModifyEbsDefaultKmsKeyIdInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38422,10 +38772,10 @@ func (m *awsEc2query_serializeOpModifyImageAttribute) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyImageAttribute") + body.Key("Action").String("ModifyEbsDefaultKmsKeyId") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyImageAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyEbsDefaultKmsKeyIdInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38448,14 +38798,14 @@ func (m *awsEc2query_serializeOpModifyImageAttribute) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceAttribute struct { +type awsEc2query_serializeOpModifyFleet struct { } -func (*awsEc2query_serializeOpModifyInstanceAttribute) ID() string { +func (*awsEc2query_serializeOpModifyFleet) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyFleet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38467,7 +38817,7 @@ func (m *awsEc2query_serializeOpModifyInstanceAttribute) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceAttributeInput) + input, ok := in.Parameters.(*ModifyFleetInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38492,10 +38842,10 @@ func (m *awsEc2query_serializeOpModifyInstanceAttribute) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceAttribute") + body.Key("Action").String("ModifyFleet") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyFleetInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38518,14 +38868,14 @@ func (m *awsEc2query_serializeOpModifyInstanceAttribute) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes struct { +type awsEc2query_serializeOpModifyFpgaImageAttribute struct { } -func (*awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) ID() string { +func (*awsEc2query_serializeOpModifyFpgaImageAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyFpgaImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38537,7 +38887,7 @@ func (m *awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceCapacityReservationAttributesInput) + input, ok := in.Parameters.(*ModifyFpgaImageAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38562,10 +38912,10 @@ func (m *awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceCapacityReservationAttributes") + body.Key("Action").String("ModifyFpgaImageAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceCapacityReservationAttributesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyFpgaImageAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38588,14 +38938,14 @@ func (m *awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceCpuOptions struct { +type awsEc2query_serializeOpModifyHosts struct { } -func (*awsEc2query_serializeOpModifyInstanceCpuOptions) ID() string { +func (*awsEc2query_serializeOpModifyHosts) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceCpuOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38607,7 +38957,7 @@ func (m *awsEc2query_serializeOpModifyInstanceCpuOptions) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceCpuOptionsInput) + input, ok := in.Parameters.(*ModifyHostsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38632,10 +38982,10 @@ func (m *awsEc2query_serializeOpModifyInstanceCpuOptions) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceCpuOptions") + body.Key("Action").String("ModifyHosts") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceCpuOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyHostsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38658,14 +39008,14 @@ func (m *awsEc2query_serializeOpModifyInstanceCpuOptions) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceCreditSpecification struct { +type awsEc2query_serializeOpModifyIdentityIdFormat struct { } -func (*awsEc2query_serializeOpModifyInstanceCreditSpecification) ID() string { +func (*awsEc2query_serializeOpModifyIdentityIdFormat) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceCreditSpecification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIdentityIdFormat) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38677,7 +39027,7 @@ func (m *awsEc2query_serializeOpModifyInstanceCreditSpecification) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceCreditSpecificationInput) + input, ok := in.Parameters.(*ModifyIdentityIdFormatInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38702,10 +39052,10 @@ func (m *awsEc2query_serializeOpModifyInstanceCreditSpecification) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceCreditSpecification") + body.Key("Action").String("ModifyIdentityIdFormat") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceCreditSpecificationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIdentityIdFormatInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38728,14 +39078,14 @@ func (m *awsEc2query_serializeOpModifyInstanceCreditSpecification) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceEventStartTime struct { +type awsEc2query_serializeOpModifyIdFormat struct { } -func (*awsEc2query_serializeOpModifyInstanceEventStartTime) ID() string { +func (*awsEc2query_serializeOpModifyIdFormat) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceEventStartTime) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIdFormat) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38747,7 +39097,7 @@ func (m *awsEc2query_serializeOpModifyInstanceEventStartTime) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceEventStartTimeInput) + input, ok := in.Parameters.(*ModifyIdFormatInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38772,10 +39122,10 @@ func (m *awsEc2query_serializeOpModifyInstanceEventStartTime) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceEventStartTime") + body.Key("Action").String("ModifyIdFormat") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceEventStartTimeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIdFormatInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38798,14 +39148,14 @@ func (m *awsEc2query_serializeOpModifyInstanceEventStartTime) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceEventWindow struct { +type awsEc2query_serializeOpModifyImageAttribute struct { } -func (*awsEc2query_serializeOpModifyInstanceEventWindow) ID() string { +func (*awsEc2query_serializeOpModifyImageAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceEventWindow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38817,7 +39167,7 @@ func (m *awsEc2query_serializeOpModifyInstanceEventWindow) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceEventWindowInput) + input, ok := in.Parameters.(*ModifyImageAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38842,10 +39192,10 @@ func (m *awsEc2query_serializeOpModifyInstanceEventWindow) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceEventWindow") + body.Key("Action").String("ModifyImageAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceEventWindowInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyImageAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38868,14 +39218,14 @@ func (m *awsEc2query_serializeOpModifyInstanceEventWindow) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceMaintenanceOptions struct { +type awsEc2query_serializeOpModifyInstanceAttribute struct { } -func (*awsEc2query_serializeOpModifyInstanceMaintenanceOptions) ID() string { +func (*awsEc2query_serializeOpModifyInstanceAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceMaintenanceOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38887,7 +39237,7 @@ func (m *awsEc2query_serializeOpModifyInstanceMaintenanceOptions) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceMaintenanceOptionsInput) + input, ok := in.Parameters.(*ModifyInstanceAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38912,10 +39262,10 @@ func (m *awsEc2query_serializeOpModifyInstanceMaintenanceOptions) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceMaintenanceOptions") + body.Key("Action").String("ModifyInstanceAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceMaintenanceOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -38938,14 +39288,14 @@ func (m *awsEc2query_serializeOpModifyInstanceMaintenanceOptions) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceMetadataDefaults struct { +type awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes struct { } -func (*awsEc2query_serializeOpModifyInstanceMetadataDefaults) ID() string { +func (*awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceMetadataDefaults) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceCapacityReservationAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -38957,7 +39307,7 @@ func (m *awsEc2query_serializeOpModifyInstanceMetadataDefaults) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceMetadataDefaultsInput) + input, ok := in.Parameters.(*ModifyInstanceCapacityReservationAttributesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -38982,10 +39332,10 @@ func (m *awsEc2query_serializeOpModifyInstanceMetadataDefaults) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceMetadataDefaults") + body.Key("Action").String("ModifyInstanceCapacityReservationAttributes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceMetadataDefaultsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceCapacityReservationAttributesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39008,14 +39358,14 @@ func (m *awsEc2query_serializeOpModifyInstanceMetadataDefaults) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceMetadataOptions struct { +type awsEc2query_serializeOpModifyInstanceConnectEndpoint struct { } -func (*awsEc2query_serializeOpModifyInstanceMetadataOptions) ID() string { +func (*awsEc2query_serializeOpModifyInstanceConnectEndpoint) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceMetadataOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceConnectEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39027,7 +39377,7 @@ func (m *awsEc2query_serializeOpModifyInstanceMetadataOptions) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceMetadataOptionsInput) + input, ok := in.Parameters.(*ModifyInstanceConnectEndpointInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39052,10 +39402,10 @@ func (m *awsEc2query_serializeOpModifyInstanceMetadataOptions) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceMetadataOptions") + body.Key("Action").String("ModifyInstanceConnectEndpoint") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceMetadataOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceConnectEndpointInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39078,14 +39428,14 @@ func (m *awsEc2query_serializeOpModifyInstanceMetadataOptions) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions struct { +type awsEc2query_serializeOpModifyInstanceCpuOptions struct { } -func (*awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) ID() string { +func (*awsEc2query_serializeOpModifyInstanceCpuOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceCpuOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39097,7 +39447,7 @@ func (m *awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstanceNetworkPerformanceOptionsInput) + input, ok := in.Parameters.(*ModifyInstanceCpuOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39122,10 +39472,10 @@ func (m *awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstanceNetworkPerformanceOptions") + body.Key("Action").String("ModifyInstanceCpuOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstanceNetworkPerformanceOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceCpuOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39148,14 +39498,14 @@ func (m *awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyInstancePlacement struct { +type awsEc2query_serializeOpModifyInstanceCreditSpecification struct { } -func (*awsEc2query_serializeOpModifyInstancePlacement) ID() string { +func (*awsEc2query_serializeOpModifyInstanceCreditSpecification) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyInstancePlacement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceCreditSpecification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39167,7 +39517,7 @@ func (m *awsEc2query_serializeOpModifyInstancePlacement) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyInstancePlacementInput) + input, ok := in.Parameters.(*ModifyInstanceCreditSpecificationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39192,10 +39542,10 @@ func (m *awsEc2query_serializeOpModifyInstancePlacement) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyInstancePlacement") + body.Key("Action").String("ModifyInstanceCreditSpecification") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyInstancePlacementInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceCreditSpecificationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39218,14 +39568,14 @@ func (m *awsEc2query_serializeOpModifyInstancePlacement) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIpam struct { +type awsEc2query_serializeOpModifyInstanceEventStartTime struct { } -func (*awsEc2query_serializeOpModifyIpam) ID() string { +func (*awsEc2query_serializeOpModifyInstanceEventStartTime) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIpam) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceEventStartTime) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39237,7 +39587,7 @@ func (m *awsEc2query_serializeOpModifyIpam) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIpamInput) + input, ok := in.Parameters.(*ModifyInstanceEventStartTimeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39262,10 +39612,10 @@ func (m *awsEc2query_serializeOpModifyIpam) HandleSerialize(ctx context.Context, bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIpam") + body.Key("Action").String("ModifyInstanceEventStartTime") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIpamInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceEventStartTimeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39288,14 +39638,14 @@ func (m *awsEc2query_serializeOpModifyIpam) HandleSerialize(ctx context.Context, return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIpamPool struct { +type awsEc2query_serializeOpModifyInstanceEventWindow struct { } -func (*awsEc2query_serializeOpModifyIpamPool) ID() string { +func (*awsEc2query_serializeOpModifyInstanceEventWindow) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIpamPool) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceEventWindow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39307,7 +39657,7 @@ func (m *awsEc2query_serializeOpModifyIpamPool) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIpamPoolInput) + input, ok := in.Parameters.(*ModifyInstanceEventWindowInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39332,10 +39682,10 @@ func (m *awsEc2query_serializeOpModifyIpamPool) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIpamPool") + body.Key("Action").String("ModifyInstanceEventWindow") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIpamPoolInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceEventWindowInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39358,14 +39708,14 @@ func (m *awsEc2query_serializeOpModifyIpamPool) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIpamResourceCidr struct { +type awsEc2query_serializeOpModifyInstanceMaintenanceOptions struct { } -func (*awsEc2query_serializeOpModifyIpamResourceCidr) ID() string { +func (*awsEc2query_serializeOpModifyInstanceMaintenanceOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIpamResourceCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceMaintenanceOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39377,7 +39727,7 @@ func (m *awsEc2query_serializeOpModifyIpamResourceCidr) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIpamResourceCidrInput) + input, ok := in.Parameters.(*ModifyInstanceMaintenanceOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39402,10 +39752,10 @@ func (m *awsEc2query_serializeOpModifyIpamResourceCidr) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIpamResourceCidr") + body.Key("Action").String("ModifyInstanceMaintenanceOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIpamResourceCidrInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceMaintenanceOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39428,14 +39778,14 @@ func (m *awsEc2query_serializeOpModifyIpamResourceCidr) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIpamResourceDiscovery struct { +type awsEc2query_serializeOpModifyInstanceMetadataDefaults struct { } -func (*awsEc2query_serializeOpModifyIpamResourceDiscovery) ID() string { +func (*awsEc2query_serializeOpModifyInstanceMetadataDefaults) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIpamResourceDiscovery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceMetadataDefaults) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39447,7 +39797,7 @@ func (m *awsEc2query_serializeOpModifyIpamResourceDiscovery) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIpamResourceDiscoveryInput) + input, ok := in.Parameters.(*ModifyInstanceMetadataDefaultsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39472,10 +39822,10 @@ func (m *awsEc2query_serializeOpModifyIpamResourceDiscovery) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIpamResourceDiscovery") + body.Key("Action").String("ModifyInstanceMetadataDefaults") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIpamResourceDiscoveryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceMetadataDefaultsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39498,14 +39848,14 @@ func (m *awsEc2query_serializeOpModifyIpamResourceDiscovery) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyIpamScope struct { +type awsEc2query_serializeOpModifyInstanceMetadataOptions struct { } -func (*awsEc2query_serializeOpModifyIpamScope) ID() string { +func (*awsEc2query_serializeOpModifyInstanceMetadataOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyIpamScope) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceMetadataOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39517,7 +39867,7 @@ func (m *awsEc2query_serializeOpModifyIpamScope) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyIpamScopeInput) + input, ok := in.Parameters.(*ModifyInstanceMetadataOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39542,10 +39892,10 @@ func (m *awsEc2query_serializeOpModifyIpamScope) HandleSerialize(ctx context.Con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyIpamScope") + body.Key("Action").String("ModifyInstanceMetadataOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyIpamScopeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceMetadataOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39568,14 +39918,14 @@ func (m *awsEc2query_serializeOpModifyIpamScope) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyLaunchTemplate struct { +type awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions struct { } -func (*awsEc2query_serializeOpModifyLaunchTemplate) ID() string { +func (*awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyLaunchTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstanceNetworkPerformanceOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39587,7 +39937,7 @@ func (m *awsEc2query_serializeOpModifyLaunchTemplate) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyLaunchTemplateInput) + input, ok := in.Parameters.(*ModifyInstanceNetworkPerformanceOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39612,10 +39962,10 @@ func (m *awsEc2query_serializeOpModifyLaunchTemplate) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyLaunchTemplate") + body.Key("Action").String("ModifyInstanceNetworkPerformanceOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyLaunchTemplateInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstanceNetworkPerformanceOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39638,14 +39988,14 @@ func (m *awsEc2query_serializeOpModifyLaunchTemplate) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyLocalGatewayRoute struct { +type awsEc2query_serializeOpModifyInstancePlacement struct { } -func (*awsEc2query_serializeOpModifyLocalGatewayRoute) ID() string { +func (*awsEc2query_serializeOpModifyInstancePlacement) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyLocalGatewayRoute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyInstancePlacement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39657,7 +40007,7 @@ func (m *awsEc2query_serializeOpModifyLocalGatewayRoute) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyLocalGatewayRouteInput) + input, ok := in.Parameters.(*ModifyInstancePlacementInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39682,10 +40032,10 @@ func (m *awsEc2query_serializeOpModifyLocalGatewayRoute) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyLocalGatewayRoute") + body.Key("Action").String("ModifyInstancePlacement") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyLocalGatewayRouteInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyInstancePlacementInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39708,14 +40058,14 @@ func (m *awsEc2query_serializeOpModifyLocalGatewayRoute) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyManagedPrefixList struct { +type awsEc2query_serializeOpModifyIpam struct { } -func (*awsEc2query_serializeOpModifyManagedPrefixList) ID() string { +func (*awsEc2query_serializeOpModifyIpam) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyManagedPrefixList) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIpam) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39727,7 +40077,7 @@ func (m *awsEc2query_serializeOpModifyManagedPrefixList) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyManagedPrefixListInput) + input, ok := in.Parameters.(*ModifyIpamInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39752,10 +40102,10 @@ func (m *awsEc2query_serializeOpModifyManagedPrefixList) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyManagedPrefixList") + body.Key("Action").String("ModifyIpam") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyManagedPrefixListInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIpamInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39778,14 +40128,14 @@ func (m *awsEc2query_serializeOpModifyManagedPrefixList) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyNetworkInterfaceAttribute struct { +type awsEc2query_serializeOpModifyIpamPool struct { } -func (*awsEc2query_serializeOpModifyNetworkInterfaceAttribute) ID() string { +func (*awsEc2query_serializeOpModifyIpamPool) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyNetworkInterfaceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIpamPool) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39797,7 +40147,7 @@ func (m *awsEc2query_serializeOpModifyNetworkInterfaceAttribute) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyNetworkInterfaceAttributeInput) + input, ok := in.Parameters.(*ModifyIpamPoolInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39822,10 +40172,10 @@ func (m *awsEc2query_serializeOpModifyNetworkInterfaceAttribute) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyNetworkInterfaceAttribute") + body.Key("Action").String("ModifyIpamPool") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyNetworkInterfaceAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIpamPoolInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39848,14 +40198,14 @@ func (m *awsEc2query_serializeOpModifyNetworkInterfaceAttribute) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyPrivateDnsNameOptions struct { +type awsEc2query_serializeOpModifyIpamResourceCidr struct { } -func (*awsEc2query_serializeOpModifyPrivateDnsNameOptions) ID() string { +func (*awsEc2query_serializeOpModifyIpamResourceCidr) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyPrivateDnsNameOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIpamResourceCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39867,7 +40217,7 @@ func (m *awsEc2query_serializeOpModifyPrivateDnsNameOptions) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyPrivateDnsNameOptionsInput) + input, ok := in.Parameters.(*ModifyIpamResourceCidrInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39892,10 +40242,10 @@ func (m *awsEc2query_serializeOpModifyPrivateDnsNameOptions) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyPrivateDnsNameOptions") + body.Key("Action").String("ModifyIpamResourceCidr") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyPrivateDnsNameOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIpamResourceCidrInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39918,14 +40268,14 @@ func (m *awsEc2query_serializeOpModifyPrivateDnsNameOptions) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyReservedInstances struct { +type awsEc2query_serializeOpModifyIpamResourceDiscovery struct { } -func (*awsEc2query_serializeOpModifyReservedInstances) ID() string { +func (*awsEc2query_serializeOpModifyIpamResourceDiscovery) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyReservedInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIpamResourceDiscovery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -39937,7 +40287,7 @@ func (m *awsEc2query_serializeOpModifyReservedInstances) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyReservedInstancesInput) + input, ok := in.Parameters.(*ModifyIpamResourceDiscoveryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -39962,10 +40312,10 @@ func (m *awsEc2query_serializeOpModifyReservedInstances) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyReservedInstances") + body.Key("Action").String("ModifyIpamResourceDiscovery") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyReservedInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIpamResourceDiscoveryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -39988,14 +40338,14 @@ func (m *awsEc2query_serializeOpModifyReservedInstances) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyRouteServer struct { +type awsEc2query_serializeOpModifyIpamScope struct { } -func (*awsEc2query_serializeOpModifyRouteServer) ID() string { +func (*awsEc2query_serializeOpModifyIpamScope) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyRouteServer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyIpamScope) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40007,7 +40357,7 @@ func (m *awsEc2query_serializeOpModifyRouteServer) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyRouteServerInput) + input, ok := in.Parameters.(*ModifyIpamScopeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40032,10 +40382,10 @@ func (m *awsEc2query_serializeOpModifyRouteServer) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyRouteServer") + body.Key("Action").String("ModifyIpamScope") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyRouteServerInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyIpamScopeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40058,14 +40408,14 @@ func (m *awsEc2query_serializeOpModifyRouteServer) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifySecurityGroupRules struct { +type awsEc2query_serializeOpModifyLaunchTemplate struct { } -func (*awsEc2query_serializeOpModifySecurityGroupRules) ID() string { +func (*awsEc2query_serializeOpModifyLaunchTemplate) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifySecurityGroupRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyLaunchTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40077,7 +40427,7 @@ func (m *awsEc2query_serializeOpModifySecurityGroupRules) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifySecurityGroupRulesInput) + input, ok := in.Parameters.(*ModifyLaunchTemplateInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40102,10 +40452,10 @@ func (m *awsEc2query_serializeOpModifySecurityGroupRules) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifySecurityGroupRules") + body.Key("Action").String("ModifyLaunchTemplate") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifySecurityGroupRulesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyLaunchTemplateInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40128,14 +40478,14 @@ func (m *awsEc2query_serializeOpModifySecurityGroupRules) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifySnapshotAttribute struct { +type awsEc2query_serializeOpModifyLocalGatewayRoute struct { } -func (*awsEc2query_serializeOpModifySnapshotAttribute) ID() string { +func (*awsEc2query_serializeOpModifyLocalGatewayRoute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifySnapshotAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyLocalGatewayRoute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40147,7 +40497,7 @@ func (m *awsEc2query_serializeOpModifySnapshotAttribute) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifySnapshotAttributeInput) + input, ok := in.Parameters.(*ModifyLocalGatewayRouteInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40172,10 +40522,10 @@ func (m *awsEc2query_serializeOpModifySnapshotAttribute) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifySnapshotAttribute") + body.Key("Action").String("ModifyLocalGatewayRoute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifySnapshotAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyLocalGatewayRouteInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40198,14 +40548,14 @@ func (m *awsEc2query_serializeOpModifySnapshotAttribute) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifySnapshotTier struct { +type awsEc2query_serializeOpModifyManagedPrefixList struct { } -func (*awsEc2query_serializeOpModifySnapshotTier) ID() string { +func (*awsEc2query_serializeOpModifyManagedPrefixList) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifySnapshotTier) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyManagedPrefixList) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40217,7 +40567,7 @@ func (m *awsEc2query_serializeOpModifySnapshotTier) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifySnapshotTierInput) + input, ok := in.Parameters.(*ModifyManagedPrefixListInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40242,10 +40592,10 @@ func (m *awsEc2query_serializeOpModifySnapshotTier) HandleSerialize(ctx context. bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifySnapshotTier") + body.Key("Action").String("ModifyManagedPrefixList") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifySnapshotTierInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyManagedPrefixListInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40268,14 +40618,14 @@ func (m *awsEc2query_serializeOpModifySnapshotTier) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifySpotFleetRequest struct { +type awsEc2query_serializeOpModifyNetworkInterfaceAttribute struct { } -func (*awsEc2query_serializeOpModifySpotFleetRequest) ID() string { +func (*awsEc2query_serializeOpModifyNetworkInterfaceAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifySpotFleetRequest) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyNetworkInterfaceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40287,7 +40637,7 @@ func (m *awsEc2query_serializeOpModifySpotFleetRequest) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifySpotFleetRequestInput) + input, ok := in.Parameters.(*ModifyNetworkInterfaceAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40312,10 +40662,10 @@ func (m *awsEc2query_serializeOpModifySpotFleetRequest) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifySpotFleetRequest") + body.Key("Action").String("ModifyNetworkInterfaceAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifySpotFleetRequestInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyNetworkInterfaceAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40338,14 +40688,14 @@ func (m *awsEc2query_serializeOpModifySpotFleetRequest) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifySubnetAttribute struct { +type awsEc2query_serializeOpModifyPrivateDnsNameOptions struct { } -func (*awsEc2query_serializeOpModifySubnetAttribute) ID() string { +func (*awsEc2query_serializeOpModifyPrivateDnsNameOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifySubnetAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyPrivateDnsNameOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40357,7 +40707,7 @@ func (m *awsEc2query_serializeOpModifySubnetAttribute) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifySubnetAttributeInput) + input, ok := in.Parameters.(*ModifyPrivateDnsNameOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40382,10 +40732,10 @@ func (m *awsEc2query_serializeOpModifySubnetAttribute) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifySubnetAttribute") + body.Key("Action").String("ModifyPrivateDnsNameOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifySubnetAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyPrivateDnsNameOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40408,14 +40758,14 @@ func (m *awsEc2query_serializeOpModifySubnetAttribute) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices struct { +type awsEc2query_serializeOpModifyPublicIpDnsNameOptions struct { } -func (*awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) ID() string { +func (*awsEc2query_serializeOpModifyPublicIpDnsNameOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyPublicIpDnsNameOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40427,7 +40777,7 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) Handle return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyTrafficMirrorFilterNetworkServicesInput) + input, ok := in.Parameters.(*ModifyPublicIpDnsNameOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40452,10 +40802,10 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) Handle bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyTrafficMirrorFilterNetworkServices") + body.Key("Action").String("ModifyPublicIpDnsNameOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyTrafficMirrorFilterNetworkServicesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyPublicIpDnsNameOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40478,14 +40828,14 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) Handle return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyTrafficMirrorFilterRule struct { +type awsEc2query_serializeOpModifyReservedInstances struct { } -func (*awsEc2query_serializeOpModifyTrafficMirrorFilterRule) ID() string { +func (*awsEc2query_serializeOpModifyReservedInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyReservedInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40497,7 +40847,7 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterRule) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyTrafficMirrorFilterRuleInput) + input, ok := in.Parameters.(*ModifyReservedInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40522,10 +40872,10 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterRule) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyTrafficMirrorFilterRule") + body.Key("Action").String("ModifyReservedInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyTrafficMirrorFilterRuleInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyReservedInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40548,14 +40898,14 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterRule) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyTrafficMirrorSession struct { +type awsEc2query_serializeOpModifyRouteServer struct { } -func (*awsEc2query_serializeOpModifyTrafficMirrorSession) ID() string { +func (*awsEc2query_serializeOpModifyRouteServer) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyTrafficMirrorSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyRouteServer) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40567,7 +40917,7 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorSession) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyTrafficMirrorSessionInput) + input, ok := in.Parameters.(*ModifyRouteServerInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40592,10 +40942,10 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorSession) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyTrafficMirrorSession") + body.Key("Action").String("ModifyRouteServer") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyTrafficMirrorSessionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyRouteServerInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40618,14 +40968,14 @@ func (m *awsEc2query_serializeOpModifyTrafficMirrorSession) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyTransitGateway struct { +type awsEc2query_serializeOpModifySecurityGroupRules struct { } -func (*awsEc2query_serializeOpModifyTransitGateway) ID() string { +func (*awsEc2query_serializeOpModifySecurityGroupRules) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyTransitGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifySecurityGroupRules) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40637,7 +40987,7 @@ func (m *awsEc2query_serializeOpModifyTransitGateway) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyTransitGatewayInput) + input, ok := in.Parameters.(*ModifySecurityGroupRulesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40662,10 +41012,10 @@ func (m *awsEc2query_serializeOpModifyTransitGateway) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyTransitGateway") + body.Key("Action").String("ModifySecurityGroupRules") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyTransitGatewayInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifySecurityGroupRulesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40688,14 +41038,14 @@ func (m *awsEc2query_serializeOpModifyTransitGateway) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyTransitGatewayPrefixListReference struct { +type awsEc2query_serializeOpModifySnapshotAttribute struct { } -func (*awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) ID() string { +func (*awsEc2query_serializeOpModifySnapshotAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifySnapshotAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40707,7 +41057,7 @@ func (m *awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyTransitGatewayPrefixListReferenceInput) + input, ok := in.Parameters.(*ModifySnapshotAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40732,10 +41082,10 @@ func (m *awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyTransitGatewayPrefixListReference") + body.Key("Action").String("ModifySnapshotAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyTransitGatewayPrefixListReferenceInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifySnapshotAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40758,14 +41108,14 @@ func (m *awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyTransitGatewayVpcAttachment struct { +type awsEc2query_serializeOpModifySnapshotTier struct { } -func (*awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) ID() string { +func (*awsEc2query_serializeOpModifySnapshotTier) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifySnapshotTier) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40777,7 +41127,7 @@ func (m *awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyTransitGatewayVpcAttachmentInput) + input, ok := in.Parameters.(*ModifySnapshotTierInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40802,10 +41152,10 @@ func (m *awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyTransitGatewayVpcAttachment") + body.Key("Action").String("ModifySnapshotTier") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyTransitGatewayVpcAttachmentInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifySnapshotTierInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40828,14 +41178,14 @@ func (m *awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessEndpoint struct { +type awsEc2query_serializeOpModifySpotFleetRequest struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessEndpoint) ID() string { +func (*awsEc2query_serializeOpModifySpotFleetRequest) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifySpotFleetRequest) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40847,7 +41197,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpoint) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessEndpointInput) + input, ok := in.Parameters.(*ModifySpotFleetRequestInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40872,10 +41222,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpoint) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessEndpoint") + body.Key("Action").String("ModifySpotFleetRequest") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessEndpointInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifySpotFleetRequestInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40898,14 +41248,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpoint) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy struct { +type awsEc2query_serializeOpModifySubnetAttribute struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) ID() string { +func (*awsEc2query_serializeOpModifySubnetAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifySubnetAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40917,7 +41267,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) HandleSerial return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessEndpointPolicyInput) + input, ok := in.Parameters.(*ModifySubnetAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -40942,10 +41292,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) HandleSerial bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessEndpointPolicy") + body.Key("Action").String("ModifySubnetAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessEndpointPolicyInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifySubnetAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -40968,14 +41318,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) HandleSerial return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessGroup struct { +type awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessGroup) ID() string { +func (*awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterNetworkServices) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -40987,7 +41337,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessGroup) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessGroupInput) + input, ok := in.Parameters.(*ModifyTrafficMirrorFilterNetworkServicesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41012,10 +41362,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessGroup) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessGroup") + body.Key("Action").String("ModifyTrafficMirrorFilterNetworkServices") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessGroupInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyTrafficMirrorFilterNetworkServicesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41038,14 +41388,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessGroup) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy struct { +type awsEc2query_serializeOpModifyTrafficMirrorFilterRule struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) ID() string { +func (*awsEc2query_serializeOpModifyTrafficMirrorFilterRule) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyTrafficMirrorFilterRule) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41057,7 +41407,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessGroupPolicyInput) + input, ok := in.Parameters.(*ModifyTrafficMirrorFilterRuleInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41082,10 +41432,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessGroupPolicy") + body.Key("Action").String("ModifyTrafficMirrorFilterRule") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessGroupPolicyInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyTrafficMirrorFilterRuleInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41108,14 +41458,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessInstance struct { +type awsEc2query_serializeOpModifyTrafficMirrorSession struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessInstance) ID() string { +func (*awsEc2query_serializeOpModifyTrafficMirrorSession) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyTrafficMirrorSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41127,7 +41477,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessInstance) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessInstanceInput) + input, ok := in.Parameters.(*ModifyTrafficMirrorSessionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41152,10 +41502,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessInstance) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessInstance") + body.Key("Action").String("ModifyTrafficMirrorSession") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessInstanceInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyTrafficMirrorSessionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41178,14 +41528,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessInstance) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration struct { +type awsEc2query_serializeOpModifyTransitGateway struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration) ID() string { +func (*awsEc2query_serializeOpModifyTransitGateway) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyTransitGateway) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41197,7 +41547,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessInstanceLoggingConfigurationInput) + input, ok := in.Parameters.(*ModifyTransitGatewayInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41222,10 +41572,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessInstanceLoggingConfiguration") + body.Key("Action").String("ModifyTransitGateway") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessInstanceLoggingConfigurationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyTransitGatewayInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41248,14 +41598,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVerifiedAccessTrustProvider struct { +type awsEc2query_serializeOpModifyTransitGatewayPrefixListReference struct { } -func (*awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) ID() string { +func (*awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyTransitGatewayPrefixListReference) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41267,7 +41617,7 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVerifiedAccessTrustProviderInput) + input, ok := in.Parameters.(*ModifyTransitGatewayPrefixListReferenceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41292,10 +41642,10 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVerifiedAccessTrustProvider") + body.Key("Action").String("ModifyTransitGatewayPrefixListReference") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessTrustProviderInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyTransitGatewayPrefixListReferenceInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41318,14 +41668,14 @@ func (m *awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVolume struct { +type awsEc2query_serializeOpModifyTransitGatewayVpcAttachment struct { } -func (*awsEc2query_serializeOpModifyVolume) ID() string { +func (*awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVolume) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyTransitGatewayVpcAttachment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41337,7 +41687,7 @@ func (m *awsEc2query_serializeOpModifyVolume) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVolumeInput) + input, ok := in.Parameters.(*ModifyTransitGatewayVpcAttachmentInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41362,10 +41712,10 @@ func (m *awsEc2query_serializeOpModifyVolume) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVolume") + body.Key("Action").String("ModifyTransitGatewayVpcAttachment") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVolumeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyTransitGatewayVpcAttachmentInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41388,14 +41738,14 @@ func (m *awsEc2query_serializeOpModifyVolume) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVolumeAttribute struct { +type awsEc2query_serializeOpModifyVerifiedAccessEndpoint struct { } -func (*awsEc2query_serializeOpModifyVolumeAttribute) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessEndpoint) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVolumeAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41407,7 +41757,7 @@ func (m *awsEc2query_serializeOpModifyVolumeAttribute) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVolumeAttributeInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessEndpointInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41432,10 +41782,10 @@ func (m *awsEc2query_serializeOpModifyVolumeAttribute) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVolumeAttribute") + body.Key("Action").String("ModifyVerifiedAccessEndpoint") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVolumeAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessEndpointInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41458,14 +41808,14 @@ func (m *awsEc2query_serializeOpModifyVolumeAttribute) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcAttribute struct { +type awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy struct { } -func (*awsEc2query_serializeOpModifyVpcAttribute) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessEndpointPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41477,7 +41827,7 @@ func (m *awsEc2query_serializeOpModifyVpcAttribute) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcAttributeInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessEndpointPolicyInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41502,10 +41852,10 @@ func (m *awsEc2query_serializeOpModifyVpcAttribute) HandleSerialize(ctx context. bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcAttribute") + body.Key("Action").String("ModifyVerifiedAccessEndpointPolicy") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessEndpointPolicyInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41528,14 +41878,14 @@ func (m *awsEc2query_serializeOpModifyVpcAttribute) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion struct { +type awsEc2query_serializeOpModifyVerifiedAccessGroup struct { } -func (*awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessGroup) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41547,7 +41897,7 @@ func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcBlockPublicAccessExclusionInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessGroupInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41572,10 +41922,10 @@ func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcBlockPublicAccessExclusion") + body.Key("Action").String("ModifyVerifiedAccessGroup") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcBlockPublicAccessExclusionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessGroupInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41598,14 +41948,14 @@ func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions struct { +type awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy struct { } -func (*awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessGroupPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41617,7 +41967,7 @@ func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcBlockPublicAccessOptionsInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessGroupPolicyInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41642,10 +41992,10 @@ func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcBlockPublicAccessOptions") + body.Key("Action").String("ModifyVerifiedAccessGroupPolicy") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcBlockPublicAccessOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessGroupPolicyInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41668,14 +42018,14 @@ func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcEndpoint struct { +type awsEc2query_serializeOpModifyVerifiedAccessInstance struct { } -func (*awsEc2query_serializeOpModifyVpcEndpoint) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessInstance) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessInstance) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41687,7 +42037,7 @@ func (m *awsEc2query_serializeOpModifyVpcEndpoint) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcEndpointInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessInstanceInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41712,10 +42062,10 @@ func (m *awsEc2query_serializeOpModifyVpcEndpoint) HandleSerialize(ctx context.C bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcEndpoint") + body.Key("Action").String("ModifyVerifiedAccessInstance") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcEndpointInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessInstanceInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41738,14 +42088,14 @@ func (m *awsEc2query_serializeOpModifyVpcEndpoint) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcEndpointConnectionNotification struct { +type awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration struct { } -func (*awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessInstanceLoggingConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41757,7 +42107,7 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcEndpointConnectionNotificationInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessInstanceLoggingConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41782,10 +42132,10 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) HandleS bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcEndpointConnectionNotification") + body.Key("Action").String("ModifyVerifiedAccessInstanceLoggingConfiguration") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcEndpointConnectionNotificationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessInstanceLoggingConfigurationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41808,14 +42158,14 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) HandleS return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration struct { +type awsEc2query_serializeOpModifyVerifiedAccessTrustProvider struct { } -func (*awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) ID() string { +func (*awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVerifiedAccessTrustProvider) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41827,7 +42177,7 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcEndpointServiceConfigurationInput) + input, ok := in.Parameters.(*ModifyVerifiedAccessTrustProviderInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41852,10 +42202,10 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcEndpointServiceConfiguration") + body.Key("Action").String("ModifyVerifiedAccessTrustProvider") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcEndpointServiceConfigurationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVerifiedAccessTrustProviderInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41878,14 +42228,14 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility struct { +type awsEc2query_serializeOpModifyVolume struct { } -func (*awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) ID() string { +func (*awsEc2query_serializeOpModifyVolume) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVolume) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41897,7 +42247,7 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcEndpointServicePayerResponsibilityInput) + input, ok := in.Parameters.(*ModifyVolumeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41922,10 +42272,10 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcEndpointServicePayerResponsibility") + body.Key("Action").String("ModifyVolume") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcEndpointServicePayerResponsibilityInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVolumeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -41948,14 +42298,14 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcEndpointServicePermissions struct { +type awsEc2query_serializeOpModifyVolumeAttribute struct { } -func (*awsEc2query_serializeOpModifyVpcEndpointServicePermissions) ID() string { +func (*awsEc2query_serializeOpModifyVolumeAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcEndpointServicePermissions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVolumeAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -41967,7 +42317,7 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServicePermissions) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcEndpointServicePermissionsInput) + input, ok := in.Parameters.(*ModifyVolumeAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -41992,10 +42342,10 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServicePermissions) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcEndpointServicePermissions") + body.Key("Action").String("ModifyVolumeAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcEndpointServicePermissionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVolumeAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42018,14 +42368,14 @@ func (m *awsEc2query_serializeOpModifyVpcEndpointServicePermissions) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcPeeringConnectionOptions struct { +type awsEc2query_serializeOpModifyVpcAttribute struct { } -func (*awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) ID() string { +func (*awsEc2query_serializeOpModifyVpcAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42037,7 +42387,7 @@ func (m *awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcPeeringConnectionOptionsInput) + input, ok := in.Parameters.(*ModifyVpcAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42062,10 +42412,10 @@ func (m *awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcPeeringConnectionOptions") + body.Key("Action").String("ModifyVpcAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcPeeringConnectionOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42088,14 +42438,14 @@ func (m *awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpcTenancy struct { +type awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion struct { } -func (*awsEc2query_serializeOpModifyVpcTenancy) ID() string { +func (*awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpcTenancy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessExclusion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42107,7 +42457,7 @@ func (m *awsEc2query_serializeOpModifyVpcTenancy) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpcTenancyInput) + input, ok := in.Parameters.(*ModifyVpcBlockPublicAccessExclusionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42132,10 +42482,10 @@ func (m *awsEc2query_serializeOpModifyVpcTenancy) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpcTenancy") + body.Key("Action").String("ModifyVpcBlockPublicAccessExclusion") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpcTenancyInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcBlockPublicAccessExclusionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42158,14 +42508,14 @@ func (m *awsEc2query_serializeOpModifyVpcTenancy) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpnConnection struct { +type awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions struct { } -func (*awsEc2query_serializeOpModifyVpnConnection) ID() string { +func (*awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpnConnection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcBlockPublicAccessOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42177,7 +42527,7 @@ func (m *awsEc2query_serializeOpModifyVpnConnection) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpnConnectionInput) + input, ok := in.Parameters.(*ModifyVpcBlockPublicAccessOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42202,10 +42552,10 @@ func (m *awsEc2query_serializeOpModifyVpnConnection) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpnConnection") + body.Key("Action").String("ModifyVpcBlockPublicAccessOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpnConnectionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcBlockPublicAccessOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42228,14 +42578,14 @@ func (m *awsEc2query_serializeOpModifyVpnConnection) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpnConnectionOptions struct { +type awsEc2query_serializeOpModifyVpcEndpoint struct { } -func (*awsEc2query_serializeOpModifyVpnConnectionOptions) ID() string { +func (*awsEc2query_serializeOpModifyVpcEndpoint) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpnConnectionOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42247,7 +42597,7 @@ func (m *awsEc2query_serializeOpModifyVpnConnectionOptions) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpnConnectionOptionsInput) + input, ok := in.Parameters.(*ModifyVpcEndpointInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42272,10 +42622,10 @@ func (m *awsEc2query_serializeOpModifyVpnConnectionOptions) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpnConnectionOptions") + body.Key("Action").String("ModifyVpcEndpoint") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpnConnectionOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcEndpointInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42298,14 +42648,14 @@ func (m *awsEc2query_serializeOpModifyVpnConnectionOptions) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpnTunnelCertificate struct { +type awsEc2query_serializeOpModifyVpcEndpointConnectionNotification struct { } -func (*awsEc2query_serializeOpModifyVpnTunnelCertificate) ID() string { +func (*awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpnTunnelCertificate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcEndpointConnectionNotification) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42317,7 +42667,7 @@ func (m *awsEc2query_serializeOpModifyVpnTunnelCertificate) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpnTunnelCertificateInput) + input, ok := in.Parameters.(*ModifyVpcEndpointConnectionNotificationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42342,10 +42692,10 @@ func (m *awsEc2query_serializeOpModifyVpnTunnelCertificate) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpnTunnelCertificate") + body.Key("Action").String("ModifyVpcEndpointConnectionNotification") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpnTunnelCertificateInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcEndpointConnectionNotificationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42368,14 +42718,14 @@ func (m *awsEc2query_serializeOpModifyVpnTunnelCertificate) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpModifyVpnTunnelOptions struct { +type awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration struct { } -func (*awsEc2query_serializeOpModifyVpnTunnelOptions) ID() string { +func (*awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpModifyVpnTunnelOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcEndpointServiceConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42387,7 +42737,7 @@ func (m *awsEc2query_serializeOpModifyVpnTunnelOptions) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ModifyVpnTunnelOptionsInput) + input, ok := in.Parameters.(*ModifyVpcEndpointServiceConfigurationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42412,10 +42762,10 @@ func (m *awsEc2query_serializeOpModifyVpnTunnelOptions) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ModifyVpnTunnelOptions") + body.Key("Action").String("ModifyVpcEndpointServiceConfiguration") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentModifyVpnTunnelOptionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcEndpointServiceConfigurationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42438,14 +42788,14 @@ func (m *awsEc2query_serializeOpModifyVpnTunnelOptions) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpMonitorInstances struct { +type awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility struct { } -func (*awsEc2query_serializeOpMonitorInstances) ID() string { +func (*awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpMonitorInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcEndpointServicePayerResponsibility) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42457,7 +42807,7 @@ func (m *awsEc2query_serializeOpMonitorInstances) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*MonitorInstancesInput) + input, ok := in.Parameters.(*ModifyVpcEndpointServicePayerResponsibilityInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42482,10 +42832,10 @@ func (m *awsEc2query_serializeOpMonitorInstances) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("MonitorInstances") + body.Key("Action").String("ModifyVpcEndpointServicePayerResponsibility") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentMonitorInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcEndpointServicePayerResponsibilityInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42508,14 +42858,14 @@ func (m *awsEc2query_serializeOpMonitorInstances) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpMoveAddressToVpc struct { +type awsEc2query_serializeOpModifyVpcEndpointServicePermissions struct { } -func (*awsEc2query_serializeOpMoveAddressToVpc) ID() string { +func (*awsEc2query_serializeOpModifyVpcEndpointServicePermissions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpMoveAddressToVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcEndpointServicePermissions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42527,7 +42877,7 @@ func (m *awsEc2query_serializeOpMoveAddressToVpc) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*MoveAddressToVpcInput) + input, ok := in.Parameters.(*ModifyVpcEndpointServicePermissionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42552,10 +42902,10 @@ func (m *awsEc2query_serializeOpMoveAddressToVpc) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("MoveAddressToVpc") + body.Key("Action").String("ModifyVpcEndpointServicePermissions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentMoveAddressToVpcInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcEndpointServicePermissionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42578,14 +42928,14 @@ func (m *awsEc2query_serializeOpMoveAddressToVpc) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpMoveByoipCidrToIpam struct { +type awsEc2query_serializeOpModifyVpcPeeringConnectionOptions struct { } -func (*awsEc2query_serializeOpMoveByoipCidrToIpam) ID() string { +func (*awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpMoveByoipCidrToIpam) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcPeeringConnectionOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42597,7 +42947,7 @@ func (m *awsEc2query_serializeOpMoveByoipCidrToIpam) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*MoveByoipCidrToIpamInput) + input, ok := in.Parameters.(*ModifyVpcPeeringConnectionOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42622,10 +42972,10 @@ func (m *awsEc2query_serializeOpMoveByoipCidrToIpam) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("MoveByoipCidrToIpam") + body.Key("Action").String("ModifyVpcPeeringConnectionOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentMoveByoipCidrToIpamInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcPeeringConnectionOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42648,14 +42998,14 @@ func (m *awsEc2query_serializeOpMoveByoipCidrToIpam) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpMoveCapacityReservationInstances struct { +type awsEc2query_serializeOpModifyVpcTenancy struct { } -func (*awsEc2query_serializeOpMoveCapacityReservationInstances) ID() string { +func (*awsEc2query_serializeOpModifyVpcTenancy) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpMoveCapacityReservationInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpcTenancy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42667,7 +43017,7 @@ func (m *awsEc2query_serializeOpMoveCapacityReservationInstances) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*MoveCapacityReservationInstancesInput) + input, ok := in.Parameters.(*ModifyVpcTenancyInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42692,10 +43042,10 @@ func (m *awsEc2query_serializeOpMoveCapacityReservationInstances) HandleSerializ bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("MoveCapacityReservationInstances") + body.Key("Action").String("ModifyVpcTenancy") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentMoveCapacityReservationInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpcTenancyInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42718,14 +43068,14 @@ func (m *awsEc2query_serializeOpMoveCapacityReservationInstances) HandleSerializ return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpProvisionByoipCidr struct { +type awsEc2query_serializeOpModifyVpnConnection struct { } -func (*awsEc2query_serializeOpProvisionByoipCidr) ID() string { +func (*awsEc2query_serializeOpModifyVpnConnection) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpProvisionByoipCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpnConnection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42737,7 +43087,7 @@ func (m *awsEc2query_serializeOpProvisionByoipCidr) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ProvisionByoipCidrInput) + input, ok := in.Parameters.(*ModifyVpnConnectionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42762,10 +43112,10 @@ func (m *awsEc2query_serializeOpProvisionByoipCidr) HandleSerialize(ctx context. bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ProvisionByoipCidr") + body.Key("Action").String("ModifyVpnConnection") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentProvisionByoipCidrInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpnConnectionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42788,14 +43138,14 @@ func (m *awsEc2query_serializeOpProvisionByoipCidr) HandleSerialize(ctx context. return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpProvisionIpamByoasn struct { +type awsEc2query_serializeOpModifyVpnConnectionOptions struct { } -func (*awsEc2query_serializeOpProvisionIpamByoasn) ID() string { +func (*awsEc2query_serializeOpModifyVpnConnectionOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpProvisionIpamByoasn) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpnConnectionOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42807,7 +43157,7 @@ func (m *awsEc2query_serializeOpProvisionIpamByoasn) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ProvisionIpamByoasnInput) + input, ok := in.Parameters.(*ModifyVpnConnectionOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42832,10 +43182,10 @@ func (m *awsEc2query_serializeOpProvisionIpamByoasn) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ProvisionIpamByoasn") + body.Key("Action").String("ModifyVpnConnectionOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentProvisionIpamByoasnInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpnConnectionOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42858,14 +43208,14 @@ func (m *awsEc2query_serializeOpProvisionIpamByoasn) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpProvisionIpamPoolCidr struct { +type awsEc2query_serializeOpModifyVpnTunnelCertificate struct { } -func (*awsEc2query_serializeOpProvisionIpamPoolCidr) ID() string { +func (*awsEc2query_serializeOpModifyVpnTunnelCertificate) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpProvisionIpamPoolCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpnTunnelCertificate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42877,7 +43227,7 @@ func (m *awsEc2query_serializeOpProvisionIpamPoolCidr) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ProvisionIpamPoolCidrInput) + input, ok := in.Parameters.(*ModifyVpnTunnelCertificateInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42902,10 +43252,10 @@ func (m *awsEc2query_serializeOpProvisionIpamPoolCidr) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ProvisionIpamPoolCidr") + body.Key("Action").String("ModifyVpnTunnelCertificate") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentProvisionIpamPoolCidrInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpnTunnelCertificateInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42928,14 +43278,14 @@ func (m *awsEc2query_serializeOpProvisionIpamPoolCidr) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpProvisionPublicIpv4PoolCidr struct { +type awsEc2query_serializeOpModifyVpnTunnelOptions struct { } -func (*awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) ID() string { +func (*awsEc2query_serializeOpModifyVpnTunnelOptions) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpModifyVpnTunnelOptions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -42947,7 +43297,7 @@ func (m *awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ProvisionPublicIpv4PoolCidrInput) + input, ok := in.Parameters.(*ModifyVpnTunnelOptionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -42972,10 +43322,10 @@ func (m *awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ProvisionPublicIpv4PoolCidr") + body.Key("Action").String("ModifyVpnTunnelOptions") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentProvisionPublicIpv4PoolCidrInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentModifyVpnTunnelOptionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -42998,14 +43348,14 @@ func (m *awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpPurchaseCapacityBlock struct { +type awsEc2query_serializeOpMonitorInstances struct { } -func (*awsEc2query_serializeOpPurchaseCapacityBlock) ID() string { +func (*awsEc2query_serializeOpMonitorInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpPurchaseCapacityBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpMonitorInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43017,7 +43367,7 @@ func (m *awsEc2query_serializeOpPurchaseCapacityBlock) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PurchaseCapacityBlockInput) + input, ok := in.Parameters.(*MonitorInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43042,10 +43392,10 @@ func (m *awsEc2query_serializeOpPurchaseCapacityBlock) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("PurchaseCapacityBlock") + body.Key("Action").String("MonitorInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentPurchaseCapacityBlockInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentMonitorInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43068,14 +43418,14 @@ func (m *awsEc2query_serializeOpPurchaseCapacityBlock) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpPurchaseCapacityBlockExtension struct { +type awsEc2query_serializeOpMoveAddressToVpc struct { } -func (*awsEc2query_serializeOpPurchaseCapacityBlockExtension) ID() string { +func (*awsEc2query_serializeOpMoveAddressToVpc) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpPurchaseCapacityBlockExtension) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpMoveAddressToVpc) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43087,7 +43437,7 @@ func (m *awsEc2query_serializeOpPurchaseCapacityBlockExtension) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PurchaseCapacityBlockExtensionInput) + input, ok := in.Parameters.(*MoveAddressToVpcInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43112,10 +43462,10 @@ func (m *awsEc2query_serializeOpPurchaseCapacityBlockExtension) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("PurchaseCapacityBlockExtension") + body.Key("Action").String("MoveAddressToVpc") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentPurchaseCapacityBlockExtensionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentMoveAddressToVpcInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43138,14 +43488,14 @@ func (m *awsEc2query_serializeOpPurchaseCapacityBlockExtension) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpPurchaseHostReservation struct { +type awsEc2query_serializeOpMoveByoipCidrToIpam struct { } -func (*awsEc2query_serializeOpPurchaseHostReservation) ID() string { +func (*awsEc2query_serializeOpMoveByoipCidrToIpam) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpPurchaseHostReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpMoveByoipCidrToIpam) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43157,7 +43507,7 @@ func (m *awsEc2query_serializeOpPurchaseHostReservation) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PurchaseHostReservationInput) + input, ok := in.Parameters.(*MoveByoipCidrToIpamInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43182,10 +43532,10 @@ func (m *awsEc2query_serializeOpPurchaseHostReservation) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("PurchaseHostReservation") + body.Key("Action").String("MoveByoipCidrToIpam") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentPurchaseHostReservationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentMoveByoipCidrToIpamInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43208,14 +43558,14 @@ func (m *awsEc2query_serializeOpPurchaseHostReservation) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpPurchaseReservedInstancesOffering struct { +type awsEc2query_serializeOpMoveCapacityReservationInstances struct { } -func (*awsEc2query_serializeOpPurchaseReservedInstancesOffering) ID() string { +func (*awsEc2query_serializeOpMoveCapacityReservationInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpPurchaseReservedInstancesOffering) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpMoveCapacityReservationInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43227,7 +43577,7 @@ func (m *awsEc2query_serializeOpPurchaseReservedInstancesOffering) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PurchaseReservedInstancesOfferingInput) + input, ok := in.Parameters.(*MoveCapacityReservationInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43252,10 +43602,10 @@ func (m *awsEc2query_serializeOpPurchaseReservedInstancesOffering) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("PurchaseReservedInstancesOffering") + body.Key("Action").String("MoveCapacityReservationInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentPurchaseReservedInstancesOfferingInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentMoveCapacityReservationInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43278,14 +43628,14 @@ func (m *awsEc2query_serializeOpPurchaseReservedInstancesOffering) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpPurchaseScheduledInstances struct { +type awsEc2query_serializeOpProvisionByoipCidr struct { } -func (*awsEc2query_serializeOpPurchaseScheduledInstances) ID() string { +func (*awsEc2query_serializeOpProvisionByoipCidr) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpPurchaseScheduledInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpProvisionByoipCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43297,7 +43647,7 @@ func (m *awsEc2query_serializeOpPurchaseScheduledInstances) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*PurchaseScheduledInstancesInput) + input, ok := in.Parameters.(*ProvisionByoipCidrInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43322,10 +43672,10 @@ func (m *awsEc2query_serializeOpPurchaseScheduledInstances) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("PurchaseScheduledInstances") + body.Key("Action").String("ProvisionByoipCidr") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentPurchaseScheduledInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentProvisionByoipCidrInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43348,14 +43698,14 @@ func (m *awsEc2query_serializeOpPurchaseScheduledInstances) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRebootInstances struct { +type awsEc2query_serializeOpProvisionIpamByoasn struct { } -func (*awsEc2query_serializeOpRebootInstances) ID() string { +func (*awsEc2query_serializeOpProvisionIpamByoasn) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRebootInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpProvisionIpamByoasn) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43367,7 +43717,7 @@ func (m *awsEc2query_serializeOpRebootInstances) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RebootInstancesInput) + input, ok := in.Parameters.(*ProvisionIpamByoasnInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43392,10 +43742,10 @@ func (m *awsEc2query_serializeOpRebootInstances) HandleSerialize(ctx context.Con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RebootInstances") + body.Key("Action").String("ProvisionIpamByoasn") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRebootInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentProvisionIpamByoasnInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43418,14 +43768,14 @@ func (m *awsEc2query_serializeOpRebootInstances) HandleSerialize(ctx context.Con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRegisterImage struct { +type awsEc2query_serializeOpProvisionIpamPoolCidr struct { } -func (*awsEc2query_serializeOpRegisterImage) ID() string { +func (*awsEc2query_serializeOpProvisionIpamPoolCidr) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRegisterImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpProvisionIpamPoolCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43437,7 +43787,7 @@ func (m *awsEc2query_serializeOpRegisterImage) HandleSerialize(ctx context.Conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RegisterImageInput) + input, ok := in.Parameters.(*ProvisionIpamPoolCidrInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43462,10 +43812,10 @@ func (m *awsEc2query_serializeOpRegisterImage) HandleSerialize(ctx context.Conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RegisterImage") + body.Key("Action").String("ProvisionIpamPoolCidr") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRegisterImageInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentProvisionIpamPoolCidrInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43488,14 +43838,14 @@ func (m *awsEc2query_serializeOpRegisterImage) HandleSerialize(ctx context.Conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes struct { +type awsEc2query_serializeOpProvisionPublicIpv4PoolCidr struct { } -func (*awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) ID() string { +func (*awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpProvisionPublicIpv4PoolCidr) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43507,7 +43857,7 @@ func (m *awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RegisterInstanceEventNotificationAttributesInput) + input, ok := in.Parameters.(*ProvisionPublicIpv4PoolCidrInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43532,10 +43882,10 @@ func (m *awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RegisterInstanceEventNotificationAttributes") + body.Key("Action").String("ProvisionPublicIpv4PoolCidr") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRegisterInstanceEventNotificationAttributesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentProvisionPublicIpv4PoolCidrInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43558,14 +43908,14 @@ func (m *awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers struct { +type awsEc2query_serializeOpPurchaseCapacityBlock struct { } -func (*awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) ID() string { +func (*awsEc2query_serializeOpPurchaseCapacityBlock) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpPurchaseCapacityBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43577,7 +43927,7 @@ func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RegisterTransitGatewayMulticastGroupMembersInput) + input, ok := in.Parameters.(*PurchaseCapacityBlockInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43602,10 +43952,10 @@ func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RegisterTransitGatewayMulticastGroupMembers") + body.Key("Action").String("PurchaseCapacityBlock") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRegisterTransitGatewayMulticastGroupMembersInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentPurchaseCapacityBlockInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43628,14 +43978,14 @@ func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources struct { +type awsEc2query_serializeOpPurchaseCapacityBlockExtension struct { } -func (*awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) ID() string { +func (*awsEc2query_serializeOpPurchaseCapacityBlockExtension) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpPurchaseCapacityBlockExtension) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43647,7 +43997,7 @@ func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RegisterTransitGatewayMulticastGroupSourcesInput) + input, ok := in.Parameters.(*PurchaseCapacityBlockExtensionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43672,10 +44022,10 @@ func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RegisterTransitGatewayMulticastGroupSources") + body.Key("Action").String("PurchaseCapacityBlockExtension") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRegisterTransitGatewayMulticastGroupSourcesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentPurchaseCapacityBlockExtensionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43698,14 +44048,14 @@ func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRejectCapacityReservationBillingOwnership struct { +type awsEc2query_serializeOpPurchaseHostReservation struct { } -func (*awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) ID() string { +func (*awsEc2query_serializeOpPurchaseHostReservation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpPurchaseHostReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43717,7 +44067,7 @@ func (m *awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) Handl return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RejectCapacityReservationBillingOwnershipInput) + input, ok := in.Parameters.(*PurchaseHostReservationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43742,10 +44092,10 @@ func (m *awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) Handl bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RejectCapacityReservationBillingOwnership") + body.Key("Action").String("PurchaseHostReservation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRejectCapacityReservationBillingOwnershipInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentPurchaseHostReservationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43768,14 +44118,14 @@ func (m *awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) Handl return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations struct { +type awsEc2query_serializeOpPurchaseReservedInstancesOffering struct { } -func (*awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) ID() string { +func (*awsEc2query_serializeOpPurchaseReservedInstancesOffering) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpPurchaseReservedInstancesOffering) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43787,7 +44137,7 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RejectTransitGatewayMulticastDomainAssociationsInput) + input, ok := in.Parameters.(*PurchaseReservedInstancesOfferingInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43812,10 +44162,10 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RejectTransitGatewayMulticastDomainAssociations") + body.Key("Action").String("PurchaseReservedInstancesOffering") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRejectTransitGatewayMulticastDomainAssociationsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentPurchaseReservedInstancesOfferingInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43838,14 +44188,14 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment struct { +type awsEc2query_serializeOpPurchaseScheduledInstances struct { } -func (*awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) ID() string { +func (*awsEc2query_serializeOpPurchaseScheduledInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpPurchaseScheduledInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43857,7 +44207,7 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) HandleSer return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RejectTransitGatewayPeeringAttachmentInput) + input, ok := in.Parameters.(*PurchaseScheduledInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43882,10 +44232,10 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) HandleSer bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RejectTransitGatewayPeeringAttachment") + body.Key("Action").String("PurchaseScheduledInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRejectTransitGatewayPeeringAttachmentInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentPurchaseScheduledInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43908,14 +44258,14 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) HandleSer return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRejectTransitGatewayVpcAttachment struct { +type awsEc2query_serializeOpRebootInstances struct { } -func (*awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) ID() string { +func (*awsEc2query_serializeOpRebootInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRebootInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43927,7 +44277,7 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RejectTransitGatewayVpcAttachmentInput) + input, ok := in.Parameters.(*RebootInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -43952,10 +44302,10 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) HandleSeriali bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RejectTransitGatewayVpcAttachment") + body.Key("Action").String("RebootInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRejectTransitGatewayVpcAttachmentInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRebootInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -43978,14 +44328,14 @@ func (m *awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) HandleSeriali return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRejectVpcEndpointConnections struct { +type awsEc2query_serializeOpRegisterImage struct { } -func (*awsEc2query_serializeOpRejectVpcEndpointConnections) ID() string { +func (*awsEc2query_serializeOpRegisterImage) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRejectVpcEndpointConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRegisterImage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -43997,7 +44347,7 @@ func (m *awsEc2query_serializeOpRejectVpcEndpointConnections) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RejectVpcEndpointConnectionsInput) + input, ok := in.Parameters.(*RegisterImageInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44022,10 +44372,10 @@ func (m *awsEc2query_serializeOpRejectVpcEndpointConnections) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RejectVpcEndpointConnections") + body.Key("Action").String("RegisterImage") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRejectVpcEndpointConnectionsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRegisterImageInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44048,14 +44398,14 @@ func (m *awsEc2query_serializeOpRejectVpcEndpointConnections) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRejectVpcPeeringConnection struct { +type awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes struct { } -func (*awsEc2query_serializeOpRejectVpcPeeringConnection) ID() string { +func (*awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRejectVpcPeeringConnection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRegisterInstanceEventNotificationAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44067,7 +44417,7 @@ func (m *awsEc2query_serializeOpRejectVpcPeeringConnection) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RejectVpcPeeringConnectionInput) + input, ok := in.Parameters.(*RegisterInstanceEventNotificationAttributesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44092,10 +44442,10 @@ func (m *awsEc2query_serializeOpRejectVpcPeeringConnection) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RejectVpcPeeringConnection") + body.Key("Action").String("RegisterInstanceEventNotificationAttributes") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRejectVpcPeeringConnectionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRegisterInstanceEventNotificationAttributesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44118,14 +44468,14 @@ func (m *awsEc2query_serializeOpRejectVpcPeeringConnection) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReleaseAddress struct { +type awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers struct { } -func (*awsEc2query_serializeOpReleaseAddress) ID() string { +func (*awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReleaseAddress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupMembers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44137,7 +44487,7 @@ func (m *awsEc2query_serializeOpReleaseAddress) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReleaseAddressInput) + input, ok := in.Parameters.(*RegisterTransitGatewayMulticastGroupMembersInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44162,10 +44512,10 @@ func (m *awsEc2query_serializeOpReleaseAddress) HandleSerialize(ctx context.Cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReleaseAddress") + body.Key("Action").String("RegisterTransitGatewayMulticastGroupMembers") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReleaseAddressInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRegisterTransitGatewayMulticastGroupMembersInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44188,14 +44538,14 @@ func (m *awsEc2query_serializeOpReleaseAddress) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReleaseHosts struct { +type awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources struct { } -func (*awsEc2query_serializeOpReleaseHosts) ID() string { +func (*awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReleaseHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRegisterTransitGatewayMulticastGroupSources) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44207,7 +44557,7 @@ func (m *awsEc2query_serializeOpReleaseHosts) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReleaseHostsInput) + input, ok := in.Parameters.(*RegisterTransitGatewayMulticastGroupSourcesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44232,10 +44582,10 @@ func (m *awsEc2query_serializeOpReleaseHosts) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReleaseHosts") + body.Key("Action").String("RegisterTransitGatewayMulticastGroupSources") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReleaseHostsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRegisterTransitGatewayMulticastGroupSourcesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44258,14 +44608,14 @@ func (m *awsEc2query_serializeOpReleaseHosts) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReleaseIpamPoolAllocation struct { +type awsEc2query_serializeOpRejectCapacityReservationBillingOwnership struct { } -func (*awsEc2query_serializeOpReleaseIpamPoolAllocation) ID() string { +func (*awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReleaseIpamPoolAllocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRejectCapacityReservationBillingOwnership) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44277,7 +44627,7 @@ func (m *awsEc2query_serializeOpReleaseIpamPoolAllocation) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReleaseIpamPoolAllocationInput) + input, ok := in.Parameters.(*RejectCapacityReservationBillingOwnershipInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44302,10 +44652,10 @@ func (m *awsEc2query_serializeOpReleaseIpamPoolAllocation) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReleaseIpamPoolAllocation") + body.Key("Action").String("RejectCapacityReservationBillingOwnership") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReleaseIpamPoolAllocationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRejectCapacityReservationBillingOwnershipInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44328,14 +44678,14 @@ func (m *awsEc2query_serializeOpReleaseIpamPoolAllocation) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceIamInstanceProfileAssociation struct { +type awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations struct { } -func (*awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) ID() string { +func (*awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRejectTransitGatewayMulticastDomainAssociations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44347,7 +44697,7 @@ func (m *awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) HandleSeri return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceIamInstanceProfileAssociationInput) + input, ok := in.Parameters.(*RejectTransitGatewayMulticastDomainAssociationsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44372,10 +44722,10 @@ func (m *awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) HandleSeri bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceIamInstanceProfileAssociation") + body.Key("Action").String("RejectTransitGatewayMulticastDomainAssociations") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceIamInstanceProfileAssociationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRejectTransitGatewayMulticastDomainAssociationsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44398,14 +44748,14 @@ func (m *awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) HandleSeri return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings struct { +type awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment struct { } -func (*awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) ID() string { +func (*awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRejectTransitGatewayPeeringAttachment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44417,7 +44767,7 @@ func (m *awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) Han return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceImageCriteriaInAllowedImagesSettingsInput) + input, ok := in.Parameters.(*RejectTransitGatewayPeeringAttachmentInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44442,10 +44792,10 @@ func (m *awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) Han bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceImageCriteriaInAllowedImagesSettings") + body.Key("Action").String("RejectTransitGatewayPeeringAttachment") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceImageCriteriaInAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRejectTransitGatewayPeeringAttachmentInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44468,14 +44818,14 @@ func (m *awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) Han return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceNetworkAclAssociation struct { +type awsEc2query_serializeOpRejectTransitGatewayVpcAttachment struct { } -func (*awsEc2query_serializeOpReplaceNetworkAclAssociation) ID() string { +func (*awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceNetworkAclAssociation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRejectTransitGatewayVpcAttachment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44487,7 +44837,7 @@ func (m *awsEc2query_serializeOpReplaceNetworkAclAssociation) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceNetworkAclAssociationInput) + input, ok := in.Parameters.(*RejectTransitGatewayVpcAttachmentInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44512,10 +44862,10 @@ func (m *awsEc2query_serializeOpReplaceNetworkAclAssociation) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceNetworkAclAssociation") + body.Key("Action").String("RejectTransitGatewayVpcAttachment") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceNetworkAclAssociationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRejectTransitGatewayVpcAttachmentInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44538,14 +44888,14 @@ func (m *awsEc2query_serializeOpReplaceNetworkAclAssociation) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceNetworkAclEntry struct { +type awsEc2query_serializeOpRejectVpcEndpointConnections struct { } -func (*awsEc2query_serializeOpReplaceNetworkAclEntry) ID() string { +func (*awsEc2query_serializeOpRejectVpcEndpointConnections) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceNetworkAclEntry) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRejectVpcEndpointConnections) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44557,7 +44907,7 @@ func (m *awsEc2query_serializeOpReplaceNetworkAclEntry) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceNetworkAclEntryInput) + input, ok := in.Parameters.(*RejectVpcEndpointConnectionsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44582,10 +44932,10 @@ func (m *awsEc2query_serializeOpReplaceNetworkAclEntry) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceNetworkAclEntry") + body.Key("Action").String("RejectVpcEndpointConnections") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceNetworkAclEntryInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRejectVpcEndpointConnectionsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44608,14 +44958,14 @@ func (m *awsEc2query_serializeOpReplaceNetworkAclEntry) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceRoute struct { +type awsEc2query_serializeOpRejectVpcPeeringConnection struct { } -func (*awsEc2query_serializeOpReplaceRoute) ID() string { +func (*awsEc2query_serializeOpRejectVpcPeeringConnection) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceRoute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRejectVpcPeeringConnection) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44627,7 +44977,7 @@ func (m *awsEc2query_serializeOpReplaceRoute) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceRouteInput) + input, ok := in.Parameters.(*RejectVpcPeeringConnectionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44652,10 +45002,10 @@ func (m *awsEc2query_serializeOpReplaceRoute) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceRoute") + body.Key("Action").String("RejectVpcPeeringConnection") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceRouteInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRejectVpcPeeringConnectionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44678,14 +45028,14 @@ func (m *awsEc2query_serializeOpReplaceRoute) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceRouteTableAssociation struct { +type awsEc2query_serializeOpReleaseAddress struct { } -func (*awsEc2query_serializeOpReplaceRouteTableAssociation) ID() string { +func (*awsEc2query_serializeOpReleaseAddress) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceRouteTableAssociation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReleaseAddress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44697,7 +45047,7 @@ func (m *awsEc2query_serializeOpReplaceRouteTableAssociation) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceRouteTableAssociationInput) + input, ok := in.Parameters.(*ReleaseAddressInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44722,10 +45072,10 @@ func (m *awsEc2query_serializeOpReplaceRouteTableAssociation) HandleSerialize(ct bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceRouteTableAssociation") + body.Key("Action").String("ReleaseAddress") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceRouteTableAssociationInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReleaseAddressInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44748,14 +45098,14 @@ func (m *awsEc2query_serializeOpReplaceRouteTableAssociation) HandleSerialize(ct return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceTransitGatewayRoute struct { +type awsEc2query_serializeOpReleaseHosts struct { } -func (*awsEc2query_serializeOpReplaceTransitGatewayRoute) ID() string { +func (*awsEc2query_serializeOpReleaseHosts) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceTransitGatewayRoute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReleaseHosts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44767,7 +45117,7 @@ func (m *awsEc2query_serializeOpReplaceTransitGatewayRoute) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceTransitGatewayRouteInput) + input, ok := in.Parameters.(*ReleaseHostsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44792,10 +45142,10 @@ func (m *awsEc2query_serializeOpReplaceTransitGatewayRoute) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceTransitGatewayRoute") + body.Key("Action").String("ReleaseHosts") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceTransitGatewayRouteInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReleaseHostsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44818,14 +45168,14 @@ func (m *awsEc2query_serializeOpReplaceTransitGatewayRoute) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReplaceVpnTunnel struct { +type awsEc2query_serializeOpReleaseIpamPoolAllocation struct { } -func (*awsEc2query_serializeOpReplaceVpnTunnel) ID() string { +func (*awsEc2query_serializeOpReleaseIpamPoolAllocation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReplaceVpnTunnel) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReleaseIpamPoolAllocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44837,7 +45187,7 @@ func (m *awsEc2query_serializeOpReplaceVpnTunnel) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReplaceVpnTunnelInput) + input, ok := in.Parameters.(*ReleaseIpamPoolAllocationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44862,10 +45212,10 @@ func (m *awsEc2query_serializeOpReplaceVpnTunnel) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReplaceVpnTunnel") + body.Key("Action").String("ReleaseIpamPoolAllocation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReplaceVpnTunnelInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReleaseIpamPoolAllocationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44888,14 +45238,14 @@ func (m *awsEc2query_serializeOpReplaceVpnTunnel) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpReportInstanceStatus struct { +type awsEc2query_serializeOpReplaceIamInstanceProfileAssociation struct { } -func (*awsEc2query_serializeOpReportInstanceStatus) ID() string { +func (*awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpReportInstanceStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceIamInstanceProfileAssociation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44907,7 +45257,7 @@ func (m *awsEc2query_serializeOpReportInstanceStatus) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ReportInstanceStatusInput) + input, ok := in.Parameters.(*ReplaceIamInstanceProfileAssociationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -44932,10 +45282,10 @@ func (m *awsEc2query_serializeOpReportInstanceStatus) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ReportInstanceStatus") + body.Key("Action").String("ReplaceIamInstanceProfileAssociation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentReportInstanceStatusInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceIamInstanceProfileAssociationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -44958,14 +45308,14 @@ func (m *awsEc2query_serializeOpReportInstanceStatus) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRequestSpotFleet struct { +type awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings struct { } -func (*awsEc2query_serializeOpRequestSpotFleet) ID() string { +func (*awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRequestSpotFleet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceImageCriteriaInAllowedImagesSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -44977,7 +45327,7 @@ func (m *awsEc2query_serializeOpRequestSpotFleet) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RequestSpotFleetInput) + input, ok := in.Parameters.(*ReplaceImageCriteriaInAllowedImagesSettingsInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45002,10 +45352,10 @@ func (m *awsEc2query_serializeOpRequestSpotFleet) HandleSerialize(ctx context.Co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RequestSpotFleet") + body.Key("Action").String("ReplaceImageCriteriaInAllowedImagesSettings") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRequestSpotFleetInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceImageCriteriaInAllowedImagesSettingsInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45028,14 +45378,14 @@ func (m *awsEc2query_serializeOpRequestSpotFleet) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRequestSpotInstances struct { +type awsEc2query_serializeOpReplaceNetworkAclAssociation struct { } -func (*awsEc2query_serializeOpRequestSpotInstances) ID() string { +func (*awsEc2query_serializeOpReplaceNetworkAclAssociation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRequestSpotInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceNetworkAclAssociation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45047,7 +45397,7 @@ func (m *awsEc2query_serializeOpRequestSpotInstances) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RequestSpotInstancesInput) + input, ok := in.Parameters.(*ReplaceNetworkAclAssociationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45072,10 +45422,10 @@ func (m *awsEc2query_serializeOpRequestSpotInstances) HandleSerialize(ctx contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RequestSpotInstances") + body.Key("Action").String("ReplaceNetworkAclAssociation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRequestSpotInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceNetworkAclAssociationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45098,14 +45448,14 @@ func (m *awsEc2query_serializeOpRequestSpotInstances) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetAddressAttribute struct { +type awsEc2query_serializeOpReplaceNetworkAclEntry struct { } -func (*awsEc2query_serializeOpResetAddressAttribute) ID() string { +func (*awsEc2query_serializeOpReplaceNetworkAclEntry) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetAddressAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceNetworkAclEntry) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45117,7 +45467,7 @@ func (m *awsEc2query_serializeOpResetAddressAttribute) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetAddressAttributeInput) + input, ok := in.Parameters.(*ReplaceNetworkAclEntryInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45142,10 +45492,10 @@ func (m *awsEc2query_serializeOpResetAddressAttribute) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetAddressAttribute") + body.Key("Action").String("ReplaceNetworkAclEntry") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetAddressAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceNetworkAclEntryInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45168,14 +45518,14 @@ func (m *awsEc2query_serializeOpResetAddressAttribute) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetEbsDefaultKmsKeyId struct { +type awsEc2query_serializeOpReplaceRoute struct { } -func (*awsEc2query_serializeOpResetEbsDefaultKmsKeyId) ID() string { +func (*awsEc2query_serializeOpReplaceRoute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetEbsDefaultKmsKeyId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceRoute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45187,7 +45537,7 @@ func (m *awsEc2query_serializeOpResetEbsDefaultKmsKeyId) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetEbsDefaultKmsKeyIdInput) + input, ok := in.Parameters.(*ReplaceRouteInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45212,10 +45562,10 @@ func (m *awsEc2query_serializeOpResetEbsDefaultKmsKeyId) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetEbsDefaultKmsKeyId") + body.Key("Action").String("ReplaceRoute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetEbsDefaultKmsKeyIdInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceRouteInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45238,14 +45588,14 @@ func (m *awsEc2query_serializeOpResetEbsDefaultKmsKeyId) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetFpgaImageAttribute struct { +type awsEc2query_serializeOpReplaceRouteTableAssociation struct { } -func (*awsEc2query_serializeOpResetFpgaImageAttribute) ID() string { +func (*awsEc2query_serializeOpReplaceRouteTableAssociation) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetFpgaImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceRouteTableAssociation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45257,7 +45607,7 @@ func (m *awsEc2query_serializeOpResetFpgaImageAttribute) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetFpgaImageAttributeInput) + input, ok := in.Parameters.(*ReplaceRouteTableAssociationInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45282,10 +45632,10 @@ func (m *awsEc2query_serializeOpResetFpgaImageAttribute) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetFpgaImageAttribute") + body.Key("Action").String("ReplaceRouteTableAssociation") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetFpgaImageAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceRouteTableAssociationInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45308,14 +45658,14 @@ func (m *awsEc2query_serializeOpResetFpgaImageAttribute) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetImageAttribute struct { +type awsEc2query_serializeOpReplaceTransitGatewayRoute struct { } -func (*awsEc2query_serializeOpResetImageAttribute) ID() string { +func (*awsEc2query_serializeOpReplaceTransitGatewayRoute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceTransitGatewayRoute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45327,7 +45677,7 @@ func (m *awsEc2query_serializeOpResetImageAttribute) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetImageAttributeInput) + input, ok := in.Parameters.(*ReplaceTransitGatewayRouteInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45352,10 +45702,10 @@ func (m *awsEc2query_serializeOpResetImageAttribute) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetImageAttribute") + body.Key("Action").String("ReplaceTransitGatewayRoute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetImageAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceTransitGatewayRouteInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45378,14 +45728,14 @@ func (m *awsEc2query_serializeOpResetImageAttribute) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetInstanceAttribute struct { +type awsEc2query_serializeOpReplaceVpnTunnel struct { } -func (*awsEc2query_serializeOpResetInstanceAttribute) ID() string { +func (*awsEc2query_serializeOpReplaceVpnTunnel) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetInstanceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReplaceVpnTunnel) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45397,7 +45747,7 @@ func (m *awsEc2query_serializeOpResetInstanceAttribute) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetInstanceAttributeInput) + input, ok := in.Parameters.(*ReplaceVpnTunnelInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45422,10 +45772,10 @@ func (m *awsEc2query_serializeOpResetInstanceAttribute) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetInstanceAttribute") + body.Key("Action").String("ReplaceVpnTunnel") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetInstanceAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReplaceVpnTunnelInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45448,14 +45798,14 @@ func (m *awsEc2query_serializeOpResetInstanceAttribute) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetNetworkInterfaceAttribute struct { +type awsEc2query_serializeOpReportInstanceStatus struct { } -func (*awsEc2query_serializeOpResetNetworkInterfaceAttribute) ID() string { +func (*awsEc2query_serializeOpReportInstanceStatus) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetNetworkInterfaceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpReportInstanceStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45467,7 +45817,7 @@ func (m *awsEc2query_serializeOpResetNetworkInterfaceAttribute) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetNetworkInterfaceAttributeInput) + input, ok := in.Parameters.(*ReportInstanceStatusInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45492,10 +45842,10 @@ func (m *awsEc2query_serializeOpResetNetworkInterfaceAttribute) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetNetworkInterfaceAttribute") + body.Key("Action").String("ReportInstanceStatus") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetNetworkInterfaceAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentReportInstanceStatusInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45518,14 +45868,14 @@ func (m *awsEc2query_serializeOpResetNetworkInterfaceAttribute) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpResetSnapshotAttribute struct { +type awsEc2query_serializeOpRequestSpotFleet struct { } -func (*awsEc2query_serializeOpResetSnapshotAttribute) ID() string { +func (*awsEc2query_serializeOpRequestSpotFleet) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpResetSnapshotAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRequestSpotFleet) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45537,7 +45887,7 @@ func (m *awsEc2query_serializeOpResetSnapshotAttribute) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*ResetSnapshotAttributeInput) + input, ok := in.Parameters.(*RequestSpotFleetInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45562,10 +45912,10 @@ func (m *awsEc2query_serializeOpResetSnapshotAttribute) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("ResetSnapshotAttribute") + body.Key("Action").String("RequestSpotFleet") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentResetSnapshotAttributeInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRequestSpotFleetInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45588,14 +45938,14 @@ func (m *awsEc2query_serializeOpResetSnapshotAttribute) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRestoreAddressToClassic struct { +type awsEc2query_serializeOpRequestSpotInstances struct { } -func (*awsEc2query_serializeOpRestoreAddressToClassic) ID() string { +func (*awsEc2query_serializeOpRequestSpotInstances) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRestoreAddressToClassic) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRequestSpotInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45607,7 +45957,7 @@ func (m *awsEc2query_serializeOpRestoreAddressToClassic) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RestoreAddressToClassicInput) + input, ok := in.Parameters.(*RequestSpotInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45632,10 +45982,10 @@ func (m *awsEc2query_serializeOpRestoreAddressToClassic) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RestoreAddressToClassic") + body.Key("Action").String("RequestSpotInstances") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRestoreAddressToClassicInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRequestSpotInstancesInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45658,14 +46008,14 @@ func (m *awsEc2query_serializeOpRestoreAddressToClassic) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRestoreImageFromRecycleBin struct { +type awsEc2query_serializeOpResetAddressAttribute struct { } -func (*awsEc2query_serializeOpRestoreImageFromRecycleBin) ID() string { +func (*awsEc2query_serializeOpResetAddressAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRestoreImageFromRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetAddressAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45677,7 +46027,7 @@ func (m *awsEc2query_serializeOpRestoreImageFromRecycleBin) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RestoreImageFromRecycleBinInput) + input, ok := in.Parameters.(*ResetAddressAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45702,10 +46052,10 @@ func (m *awsEc2query_serializeOpRestoreImageFromRecycleBin) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RestoreImageFromRecycleBin") + body.Key("Action").String("ResetAddressAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRestoreImageFromRecycleBinInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetAddressAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45728,14 +46078,14 @@ func (m *awsEc2query_serializeOpRestoreImageFromRecycleBin) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRestoreManagedPrefixListVersion struct { +type awsEc2query_serializeOpResetEbsDefaultKmsKeyId struct { } -func (*awsEc2query_serializeOpRestoreManagedPrefixListVersion) ID() string { +func (*awsEc2query_serializeOpResetEbsDefaultKmsKeyId) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRestoreManagedPrefixListVersion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetEbsDefaultKmsKeyId) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45747,7 +46097,7 @@ func (m *awsEc2query_serializeOpRestoreManagedPrefixListVersion) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RestoreManagedPrefixListVersionInput) + input, ok := in.Parameters.(*ResetEbsDefaultKmsKeyIdInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45772,10 +46122,10 @@ func (m *awsEc2query_serializeOpRestoreManagedPrefixListVersion) HandleSerialize bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RestoreManagedPrefixListVersion") + body.Key("Action").String("ResetEbsDefaultKmsKeyId") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRestoreManagedPrefixListVersionInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetEbsDefaultKmsKeyIdInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45798,14 +46148,14 @@ func (m *awsEc2query_serializeOpRestoreManagedPrefixListVersion) HandleSerialize return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRestoreSnapshotFromRecycleBin struct { +type awsEc2query_serializeOpResetFpgaImageAttribute struct { } -func (*awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) ID() string { +func (*awsEc2query_serializeOpResetFpgaImageAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetFpgaImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45817,7 +46167,7 @@ func (m *awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) HandleSerialize(c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RestoreSnapshotFromRecycleBinInput) + input, ok := in.Parameters.(*ResetFpgaImageAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45842,10 +46192,10 @@ func (m *awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) HandleSerialize(c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RestoreSnapshotFromRecycleBin") + body.Key("Action").String("ResetFpgaImageAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRestoreSnapshotFromRecycleBinInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetFpgaImageAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45868,14 +46218,14 @@ func (m *awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) HandleSerialize(c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRestoreSnapshotTier struct { +type awsEc2query_serializeOpResetImageAttribute struct { } -func (*awsEc2query_serializeOpRestoreSnapshotTier) ID() string { +func (*awsEc2query_serializeOpResetImageAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRestoreSnapshotTier) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetImageAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45887,7 +46237,7 @@ func (m *awsEc2query_serializeOpRestoreSnapshotTier) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RestoreSnapshotTierInput) + input, ok := in.Parameters.(*ResetImageAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45912,10 +46262,10 @@ func (m *awsEc2query_serializeOpRestoreSnapshotTier) HandleSerialize(ctx context bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RestoreSnapshotTier") + body.Key("Action").String("ResetImageAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRestoreSnapshotTierInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetImageAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -45938,14 +46288,14 @@ func (m *awsEc2query_serializeOpRestoreSnapshotTier) HandleSerialize(ctx context return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRevokeClientVpnIngress struct { +type awsEc2query_serializeOpResetInstanceAttribute struct { } -func (*awsEc2query_serializeOpRevokeClientVpnIngress) ID() string { +func (*awsEc2query_serializeOpResetInstanceAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRevokeClientVpnIngress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetInstanceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -45957,7 +46307,7 @@ func (m *awsEc2query_serializeOpRevokeClientVpnIngress) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RevokeClientVpnIngressInput) + input, ok := in.Parameters.(*ResetInstanceAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -45982,10 +46332,10 @@ func (m *awsEc2query_serializeOpRevokeClientVpnIngress) HandleSerialize(ctx cont bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RevokeClientVpnIngress") + body.Key("Action").String("ResetInstanceAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRevokeClientVpnIngressInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetInstanceAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46008,14 +46358,14 @@ func (m *awsEc2query_serializeOpRevokeClientVpnIngress) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRevokeSecurityGroupEgress struct { +type awsEc2query_serializeOpResetNetworkInterfaceAttribute struct { } -func (*awsEc2query_serializeOpRevokeSecurityGroupEgress) ID() string { +func (*awsEc2query_serializeOpResetNetworkInterfaceAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRevokeSecurityGroupEgress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetNetworkInterfaceAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46027,7 +46377,7 @@ func (m *awsEc2query_serializeOpRevokeSecurityGroupEgress) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RevokeSecurityGroupEgressInput) + input, ok := in.Parameters.(*ResetNetworkInterfaceAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46052,10 +46402,10 @@ func (m *awsEc2query_serializeOpRevokeSecurityGroupEgress) HandleSerialize(ctx c bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RevokeSecurityGroupEgress") + body.Key("Action").String("ResetNetworkInterfaceAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRevokeSecurityGroupEgressInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetNetworkInterfaceAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46078,14 +46428,14 @@ func (m *awsEc2query_serializeOpRevokeSecurityGroupEgress) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRevokeSecurityGroupIngress struct { +type awsEc2query_serializeOpResetSnapshotAttribute struct { } -func (*awsEc2query_serializeOpRevokeSecurityGroupIngress) ID() string { +func (*awsEc2query_serializeOpResetSnapshotAttribute) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRevokeSecurityGroupIngress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpResetSnapshotAttribute) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46097,7 +46447,7 @@ func (m *awsEc2query_serializeOpRevokeSecurityGroupIngress) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RevokeSecurityGroupIngressInput) + input, ok := in.Parameters.(*ResetSnapshotAttributeInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46122,10 +46472,10 @@ func (m *awsEc2query_serializeOpRevokeSecurityGroupIngress) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RevokeSecurityGroupIngress") + body.Key("Action").String("ResetSnapshotAttribute") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRevokeSecurityGroupIngressInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentResetSnapshotAttributeInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46148,14 +46498,14 @@ func (m *awsEc2query_serializeOpRevokeSecurityGroupIngress) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRunInstances struct { +type awsEc2query_serializeOpRestoreAddressToClassic struct { } -func (*awsEc2query_serializeOpRunInstances) ID() string { +func (*awsEc2query_serializeOpRestoreAddressToClassic) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRunInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRestoreAddressToClassic) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46167,7 +46517,7 @@ func (m *awsEc2query_serializeOpRunInstances) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RunInstancesInput) + input, ok := in.Parameters.(*RestoreAddressToClassicInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46192,10 +46542,10 @@ func (m *awsEc2query_serializeOpRunInstances) HandleSerialize(ctx context.Contex bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RunInstances") + body.Key("Action").String("RestoreAddressToClassic") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRunInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRestoreAddressToClassicInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46218,14 +46568,14 @@ func (m *awsEc2query_serializeOpRunInstances) HandleSerialize(ctx context.Contex return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpRunScheduledInstances struct { +type awsEc2query_serializeOpRestoreImageFromRecycleBin struct { } -func (*awsEc2query_serializeOpRunScheduledInstances) ID() string { +func (*awsEc2query_serializeOpRestoreImageFromRecycleBin) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpRunScheduledInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRestoreImageFromRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46237,7 +46587,7 @@ func (m *awsEc2query_serializeOpRunScheduledInstances) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*RunScheduledInstancesInput) + input, ok := in.Parameters.(*RestoreImageFromRecycleBinInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46262,10 +46612,10 @@ func (m *awsEc2query_serializeOpRunScheduledInstances) HandleSerialize(ctx conte bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("RunScheduledInstances") + body.Key("Action").String("RestoreImageFromRecycleBin") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentRunScheduledInstancesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRestoreImageFromRecycleBinInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46288,14 +46638,14 @@ func (m *awsEc2query_serializeOpRunScheduledInstances) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpSearchLocalGatewayRoutes struct { +type awsEc2query_serializeOpRestoreManagedPrefixListVersion struct { } -func (*awsEc2query_serializeOpSearchLocalGatewayRoutes) ID() string { +func (*awsEc2query_serializeOpRestoreManagedPrefixListVersion) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpSearchLocalGatewayRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRestoreManagedPrefixListVersion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46307,7 +46657,7 @@ func (m *awsEc2query_serializeOpSearchLocalGatewayRoutes) HandleSerialize(ctx co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*SearchLocalGatewayRoutesInput) + input, ok := in.Parameters.(*RestoreManagedPrefixListVersionInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46332,10 +46682,10 @@ func (m *awsEc2query_serializeOpSearchLocalGatewayRoutes) HandleSerialize(ctx co bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("SearchLocalGatewayRoutes") + body.Key("Action").String("RestoreManagedPrefixListVersion") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentSearchLocalGatewayRoutesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRestoreManagedPrefixListVersionInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46358,14 +46708,14 @@ func (m *awsEc2query_serializeOpSearchLocalGatewayRoutes) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpSearchTransitGatewayMulticastGroups struct { +type awsEc2query_serializeOpRestoreSnapshotFromRecycleBin struct { } -func (*awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) ID() string { +func (*awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRestoreSnapshotFromRecycleBin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46377,7 +46727,7 @@ func (m *awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) HandleSeria return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*SearchTransitGatewayMulticastGroupsInput) + input, ok := in.Parameters.(*RestoreSnapshotFromRecycleBinInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46402,10 +46752,10 @@ func (m *awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) HandleSeria bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("SearchTransitGatewayMulticastGroups") + body.Key("Action").String("RestoreSnapshotFromRecycleBin") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentSearchTransitGatewayMulticastGroupsInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRestoreSnapshotFromRecycleBinInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46428,14 +46778,14 @@ func (m *awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) HandleSeria return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpSearchTransitGatewayRoutes struct { +type awsEc2query_serializeOpRestoreSnapshotTier struct { } -func (*awsEc2query_serializeOpSearchTransitGatewayRoutes) ID() string { +func (*awsEc2query_serializeOpRestoreSnapshotTier) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpSearchTransitGatewayRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRestoreSnapshotTier) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46447,7 +46797,7 @@ func (m *awsEc2query_serializeOpSearchTransitGatewayRoutes) HandleSerialize(ctx return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*SearchTransitGatewayRoutesInput) + input, ok := in.Parameters.(*RestoreSnapshotTierInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46472,10 +46822,10 @@ func (m *awsEc2query_serializeOpSearchTransitGatewayRoutes) HandleSerialize(ctx bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("SearchTransitGatewayRoutes") + body.Key("Action").String("RestoreSnapshotTier") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentSearchTransitGatewayRoutesInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRestoreSnapshotTierInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46498,14 +46848,14 @@ func (m *awsEc2query_serializeOpSearchTransitGatewayRoutes) HandleSerialize(ctx return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpSendDiagnosticInterrupt struct { +type awsEc2query_serializeOpRevokeClientVpnIngress struct { } -func (*awsEc2query_serializeOpSendDiagnosticInterrupt) ID() string { +func (*awsEc2query_serializeOpRevokeClientVpnIngress) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpSendDiagnosticInterrupt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRevokeClientVpnIngress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46517,7 +46867,7 @@ func (m *awsEc2query_serializeOpSendDiagnosticInterrupt) HandleSerialize(ctx con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*SendDiagnosticInterruptInput) + input, ok := in.Parameters.(*RevokeClientVpnIngressInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46542,10 +46892,10 @@ func (m *awsEc2query_serializeOpSendDiagnosticInterrupt) HandleSerialize(ctx con bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("SendDiagnosticInterrupt") + body.Key("Action").String("RevokeClientVpnIngress") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentSendDiagnosticInterruptInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRevokeClientVpnIngressInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46568,14 +46918,14 @@ func (m *awsEc2query_serializeOpSendDiagnosticInterrupt) HandleSerialize(ctx con return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpStartDeclarativePoliciesReport struct { +type awsEc2query_serializeOpRevokeSecurityGroupEgress struct { } -func (*awsEc2query_serializeOpStartDeclarativePoliciesReport) ID() string { +func (*awsEc2query_serializeOpRevokeSecurityGroupEgress) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpStartDeclarativePoliciesReport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRevokeSecurityGroupEgress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46587,7 +46937,7 @@ func (m *awsEc2query_serializeOpStartDeclarativePoliciesReport) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*StartDeclarativePoliciesReportInput) + input, ok := in.Parameters.(*RevokeSecurityGroupEgressInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -46612,10 +46962,10 @@ func (m *awsEc2query_serializeOpStartDeclarativePoliciesReport) HandleSerialize( bodyWriter := bytes.NewBuffer(nil) bodyEncoder := query.NewEncoder(bodyWriter) body := bodyEncoder.Object() - body.Key("Action").String("StartDeclarativePoliciesReport") + body.Key("Action").String("RevokeSecurityGroupEgress") body.Key("Version").String("2016-11-15") - if err := awsEc2query_serializeOpDocumentStartDeclarativePoliciesReportInput(input, bodyEncoder.Value); err != nil { + if err := awsEc2query_serializeOpDocumentRevokeSecurityGroupEgressInput(input, bodyEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -46638,14 +46988,14 @@ func (m *awsEc2query_serializeOpStartDeclarativePoliciesReport) HandleSerialize( return next.HandleSerialize(ctx, in) } -type awsEc2query_serializeOpStartInstances struct { +type awsEc2query_serializeOpRevokeSecurityGroupIngress struct { } -func (*awsEc2query_serializeOpStartInstances) ID() string { +func (*awsEc2query_serializeOpRevokeSecurityGroupIngress) ID() string { return "OperationSerializer" } -func (m *awsEc2query_serializeOpStartInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsEc2query_serializeOpRevokeSecurityGroupIngress) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -46657,7 +47007,567 @@ func (m *awsEc2query_serializeOpStartInstances) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*StartInstancesInput) + input, ok := in.Parameters.(*RevokeSecurityGroupIngressInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("RevokeSecurityGroupIngress") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentRevokeSecurityGroupIngressInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpRunInstances struct { +} + +func (*awsEc2query_serializeOpRunInstances) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpRunInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RunInstancesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("RunInstances") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentRunInstancesInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpRunScheduledInstances struct { +} + +func (*awsEc2query_serializeOpRunScheduledInstances) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpRunScheduledInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RunScheduledInstancesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("RunScheduledInstances") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentRunScheduledInstancesInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpSearchLocalGatewayRoutes struct { +} + +func (*awsEc2query_serializeOpSearchLocalGatewayRoutes) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpSearchLocalGatewayRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SearchLocalGatewayRoutesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("SearchLocalGatewayRoutes") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentSearchLocalGatewayRoutesInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpSearchTransitGatewayMulticastGroups struct { +} + +func (*awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpSearchTransitGatewayMulticastGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SearchTransitGatewayMulticastGroupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("SearchTransitGatewayMulticastGroups") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentSearchTransitGatewayMulticastGroupsInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpSearchTransitGatewayRoutes struct { +} + +func (*awsEc2query_serializeOpSearchTransitGatewayRoutes) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpSearchTransitGatewayRoutes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SearchTransitGatewayRoutesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("SearchTransitGatewayRoutes") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentSearchTransitGatewayRoutesInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpSendDiagnosticInterrupt struct { +} + +func (*awsEc2query_serializeOpSendDiagnosticInterrupt) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpSendDiagnosticInterrupt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SendDiagnosticInterruptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("SendDiagnosticInterrupt") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentSendDiagnosticInterruptInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpStartDeclarativePoliciesReport struct { +} + +func (*awsEc2query_serializeOpStartDeclarativePoliciesReport) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpStartDeclarativePoliciesReport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartDeclarativePoliciesReportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("StartDeclarativePoliciesReport") + body.Key("Version").String("2016-11-15") + + if err := awsEc2query_serializeOpDocumentStartDeclarativePoliciesReportInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsEc2query_serializeOpStartInstances struct { +} + +func (*awsEc2query_serializeOpStartInstances) ID() string { + return "OperationSerializer" +} + +func (m *awsEc2query_serializeOpStartInstances) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartInstancesInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} @@ -48282,6 +49192,19 @@ func awsEc2query_serializeDocumentBundleIdStringList(v []string, value query.Val return nil } +func awsEc2query_serializeDocumentCapacityBlockIds(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Item") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsEc2query_serializeDocumentCapacityReservationFleetIdSet(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -49306,6 +50229,32 @@ func awsEc2query_serializeDocumentDeregisterInstanceTagAttributeRequest(v *types return nil } +func awsEc2query_serializeDocumentDescribeImageReferencesImageIdStringList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsEc2query_serializeDocumentDescribeImageUsageReportsImageIdStringList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsEc2query_serializeDocumentDescribeInstanceTopologyGroupNameSet(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -49482,6 +50431,16 @@ func awsEc2query_serializeDocumentEbsBlockDevice(v *types.EbsBlockDevice, value object := value.Object() _ = object + if v.AvailabilityZone != nil { + objectKey := object.Key("AvailabilityZone") + objectKey.String(*v.AvailabilityZone) + } + + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.DeleteOnTermination != nil { objectKey := object.Key("DeleteOnTermination") objectKey.Boolean(*v.DeleteOnTermination) @@ -50570,6 +51529,113 @@ func awsEc2query_serializeDocumentImageProviderRequestList(v []string, value que return nil } +func awsEc2query_serializeDocumentImageUsageReportIdStringList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsEc2query_serializeDocumentImageUsageReportUserIdStringList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("UserId") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsEc2query_serializeDocumentImageUsageResourceTypeOptionRequest(v *types.ImageUsageResourceTypeOptionRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.OptionName != nil { + objectKey := object.Key("OptionName") + objectKey.String(*v.OptionName) + } + + if v.OptionValues != nil { + objectKey := object.FlatKey("OptionValue") + if err := awsEc2query_serializeDocumentImageUsageResourceTypeOptionValuesList(v.OptionValues, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsEc2query_serializeDocumentImageUsageResourceTypeOptionRequestList(v []types.ImageUsageResourceTypeOptionRequest, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + if err := awsEc2query_serializeDocumentImageUsageResourceTypeOptionRequest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsEc2query_serializeDocumentImageUsageResourceTypeOptionValuesList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Item") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsEc2query_serializeDocumentImageUsageResourceTypeRequest(v *types.ImageUsageResourceTypeRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.ResourceType != nil { + objectKey := object.Key("ResourceType") + objectKey.String(*v.ResourceType) + } + + if v.ResourceTypeOptions != nil { + objectKey := object.FlatKey("ResourceTypeOption") + if err := awsEc2query_serializeDocumentImageUsageResourceTypeOptionRequestList(v.ResourceTypeOptions, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsEc2query_serializeDocumentImageUsageResourceTypeRequestList(v []types.ImageUsageResourceTypeRequest, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + if err := awsEc2query_serializeDocumentImageUsageResourceTypeRequest(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsEc2query_serializeDocumentImportImageLicenseConfigurationRequest(v *types.ImportImageLicenseConfigurationRequest, value query.Value) error { object := value.Object() _ = object @@ -52704,6 +53770,11 @@ func awsEc2query_serializeDocumentLaunchTemplatePlacementRequest(v *types.Launch objectKey.String(*v.AvailabilityZone) } + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.GroupId != nil { objectKey := object.Key("GroupId") objectKey.String(*v.GroupId) @@ -53056,6 +54127,61 @@ func awsEc2query_serializeDocumentLocalStorageTypeSet(v []types.LocalStorageType return nil } +func awsEc2query_serializeDocumentMacModificationTaskIdList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Item") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsEc2query_serializeDocumentMacSystemIntegrityProtectionConfigurationRequest(v *types.MacSystemIntegrityProtectionConfigurationRequest, value query.Value) error { + object := value.Object() + _ = object + + if len(v.AppleInternal) > 0 { + objectKey := object.Key("AppleInternal") + objectKey.String(string(v.AppleInternal)) + } + + if len(v.BaseSystem) > 0 { + objectKey := object.Key("BaseSystem") + objectKey.String(string(v.BaseSystem)) + } + + if len(v.DebuggingRestrictions) > 0 { + objectKey := object.Key("DebuggingRestrictions") + objectKey.String(string(v.DebuggingRestrictions)) + } + + if len(v.DTraceRestrictions) > 0 { + objectKey := object.Key("DTraceRestrictions") + objectKey.String(string(v.DTraceRestrictions)) + } + + if len(v.FilesystemProtections) > 0 { + objectKey := object.Key("FilesystemProtections") + objectKey.String(string(v.FilesystemProtections)) + } + + if len(v.KextSigning) > 0 { + objectKey := object.Key("KextSigning") + objectKey.String(string(v.KextSigning)) + } + + if len(v.NvramProtections) > 0 { + objectKey := object.Key("NvramProtections") + objectKey.String(string(v.NvramProtections)) + } + + return nil +} + func awsEc2query_serializeDocumentMemoryGiBPerVCpu(v *types.MemoryGiBPerVCpu, value query.Value) error { object := value.Object() _ = object @@ -54392,6 +55518,11 @@ func awsEc2query_serializeDocumentPlacement(v *types.Placement, value query.Valu objectKey.String(*v.AvailabilityZone) } + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.GroupId != nil { objectKey := object.Key("GroupId") objectKey.String(*v.GroupId) @@ -55583,6 +56714,87 @@ func awsEc2query_serializeDocumentResourceStatementRequest(v *types.ResourceStat return nil } +func awsEc2query_serializeDocumentResourceTypeOption(v *types.ResourceTypeOption, value query.Value) error { + object := value.Object() + _ = object + + if len(v.OptionName) > 0 { + objectKey := object.Key("OptionName") + objectKey.String(string(v.OptionName)) + } + + if v.OptionValues != nil { + objectKey := object.FlatKey("OptionValue") + if err := awsEc2query_serializeDocumentResourceTypeOptionValuesList(v.OptionValues, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsEc2query_serializeDocumentResourceTypeOptionList(v []types.ResourceTypeOption, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + if err := awsEc2query_serializeDocumentResourceTypeOption(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsEc2query_serializeDocumentResourceTypeOptionValuesList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsEc2query_serializeDocumentResourceTypeRequest(v *types.ResourceTypeRequest, value query.Value) error { + object := value.Object() + _ = object + + if len(v.ResourceType) > 0 { + objectKey := object.Key("ResourceType") + objectKey.String(string(v.ResourceType)) + } + + if v.ResourceTypeOptions != nil { + objectKey := object.FlatKey("ResourceTypeOption") + if err := awsEc2query_serializeDocumentResourceTypeOptionList(v.ResourceTypeOptions, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsEc2query_serializeDocumentResourceTypeRequestList(v []types.ResourceTypeRequest, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("Member") + + for i := range v { + av := array.Value() + if err := awsEc2query_serializeDocumentResourceTypeRequest(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsEc2query_serializeDocumentRestorableByStringList(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -56994,6 +58206,19 @@ func awsEc2query_serializeDocumentSubnetConfigurationsList(v []types.SubnetConfi return nil } +func awsEc2query_serializeDocumentSubnetIdList(v []string, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("AssociatedSubnetId") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsEc2query_serializeDocumentSubnetIdStringList(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -59178,6 +60403,11 @@ func awsEc2query_serializeOpDocumentAssociateRouteTableInput(v *AssociateRouteTa objectKey.String(*v.GatewayId) } + if v.PublicIpv4Pool != nil { + objectKey := object.Key("PublicIpv4Pool") + objectKey.String(*v.PublicIpv4Pool) + } + if v.RouteTableId != nil { objectKey := object.Key("RouteTableId") objectKey.String(*v.RouteTableId) @@ -60047,6 +61277,16 @@ func awsEc2query_serializeOpDocumentCopyImageInput(v *CopyImageInput, value quer objectKey.String(*v.Description) } + if v.DestinationAvailabilityZone != nil { + objectKey := object.Key("DestinationAvailabilityZone") + objectKey.String(*v.DestinationAvailabilityZone) + } + + if v.DestinationAvailabilityZoneId != nil { + objectKey := object.Key("DestinationAvailabilityZoneId") + objectKey.String(*v.DestinationAvailabilityZoneId) + } + if v.DestinationOutpostArn != nil { objectKey := object.Key("DestinationOutpostArn") objectKey.String(*v.DestinationOutpostArn) @@ -60111,6 +61351,11 @@ func awsEc2query_serializeOpDocumentCopySnapshotInput(v *CopySnapshotInput, valu objectKey.String(*v.Description) } + if v.DestinationAvailabilityZone != nil { + objectKey := object.Key("DestinationAvailabilityZone") + objectKey.String(*v.DestinationAvailabilityZone) + } + if v.DestinationOutpostArn != nil { objectKey := object.Key("DestinationOutpostArn") objectKey.String(*v.DestinationOutpostArn) @@ -60455,6 +61700,11 @@ func awsEc2query_serializeOpDocumentCreateClientVpnEndpointInput(v *CreateClient objectKey.Boolean(*v.DryRun) } + if len(v.EndpointIpAddressType) > 0 { + objectKey := object.Key("EndpointIpAddressType") + objectKey.String(string(v.EndpointIpAddressType)) + } + if v.SecurityGroupIds != nil { objectKey := object.FlatKey("SecurityGroupId") if err := awsEc2query_serializeDocumentClientVpnSecurityGroupIdSet(v.SecurityGroupIds, objectKey); err != nil { @@ -60489,6 +61739,11 @@ func awsEc2query_serializeOpDocumentCreateClientVpnEndpointInput(v *CreateClient } } + if len(v.TrafficIpAddressType) > 0 { + objectKey := object.Key("TrafficIpAddressType") + objectKey.String(string(v.TrafficIpAddressType)) + } + if len(v.TransportProtocol) > 0 { objectKey := object.Key("TransportProtocol") objectKey.String(string(v.TransportProtocol)) @@ -60653,6 +61908,11 @@ func awsEc2query_serializeOpDocumentCreateDefaultSubnetInput(v *CreateDefaultSub objectKey.String(*v.AvailabilityZone) } + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.DryRun != nil { objectKey := object.Key("DryRun") objectKey.Boolean(*v.DryRun) @@ -60678,6 +61938,40 @@ func awsEc2query_serializeOpDocumentCreateDefaultVpcInput(v *CreateDefaultVpcInp return nil } +func awsEc2query_serializeOpDocumentCreateDelegateMacVolumeOwnershipTaskInput(v *CreateDelegateMacVolumeOwnershipTaskInput, value query.Value) error { + object := value.Object() + _ = object + + if v.ClientToken != nil { + objectKey := object.Key("ClientToken") + objectKey.String(*v.ClientToken) + } + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.InstanceId != nil { + objectKey := object.Key("InstanceId") + objectKey.String(*v.InstanceId) + } + + if v.MacCredentials != nil { + objectKey := object.Key("MacCredentials") + objectKey.String(*v.MacCredentials) + } + + if v.TagSpecifications != nil { + objectKey := object.FlatKey("TagSpecification") + if err := awsEc2query_serializeDocumentTagSpecificationList(v.TagSpecifications, objectKey); err != nil { + return err + } + } + + return nil +} + func awsEc2query_serializeOpDocumentCreateDhcpOptionsInput(v *CreateDhcpOptionsInput, value query.Value) error { object := value.Object() _ = object @@ -60987,6 +62281,54 @@ func awsEc2query_serializeOpDocumentCreateImageInput(v *CreateImageInput, value objectKey.Boolean(*v.NoReboot) } + if len(v.SnapshotLocation) > 0 { + objectKey := object.Key("SnapshotLocation") + objectKey.String(string(v.SnapshotLocation)) + } + + if v.TagSpecifications != nil { + objectKey := object.FlatKey("TagSpecification") + if err := awsEc2query_serializeDocumentTagSpecificationList(v.TagSpecifications, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsEc2query_serializeOpDocumentCreateImageUsageReportInput(v *CreateImageUsageReportInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccountIds != nil { + objectKey := object.FlatKey("AccountId") + if err := awsEc2query_serializeDocumentImageUsageReportUserIdStringList(v.AccountIds, objectKey); err != nil { + return err + } + } + + if v.ClientToken != nil { + objectKey := object.Key("ClientToken") + objectKey.String(*v.ClientToken) + } + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.ImageId != nil { + objectKey := object.Key("ImageId") + objectKey.String(*v.ImageId) + } + + if v.ResourceTypes != nil { + objectKey := object.FlatKey("ResourceType") + if err := awsEc2query_serializeDocumentImageUsageResourceTypeRequestList(v.ResourceTypes, objectKey); err != nil { + return err + } + } + if v.TagSpecifications != nil { objectKey := object.FlatKey("TagSpecification") if err := awsEc2query_serializeDocumentTagSpecificationList(v.TagSpecifications, objectKey); err != nil { @@ -61011,6 +62353,11 @@ func awsEc2query_serializeOpDocumentCreateInstanceConnectEndpointInput(v *Create objectKey.Boolean(*v.DryRun) } + if len(v.IpAddressType) > 0 { + objectKey := object.Key("IpAddressType") + objectKey.String(string(v.IpAddressType)) + } + if v.PreserveClientIp != nil { objectKey := object.Key("PreserveClientIp") objectKey.Boolean(*v.PreserveClientIp) @@ -61720,6 +63067,52 @@ func awsEc2query_serializeOpDocumentCreateLocalGatewayVirtualInterfaceInput(v *C return nil } +func awsEc2query_serializeOpDocumentCreateMacSystemIntegrityProtectionModificationTaskInput(v *CreateMacSystemIntegrityProtectionModificationTaskInput, value query.Value) error { + object := value.Object() + _ = object + + if v.ClientToken != nil { + objectKey := object.Key("ClientToken") + objectKey.String(*v.ClientToken) + } + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.InstanceId != nil { + objectKey := object.Key("InstanceId") + objectKey.String(*v.InstanceId) + } + + if v.MacCredentials != nil { + objectKey := object.Key("MacCredentials") + objectKey.String(*v.MacCredentials) + } + + if v.MacSystemIntegrityProtectionConfiguration != nil { + objectKey := object.Key("MacSystemIntegrityProtectionConfiguration") + if err := awsEc2query_serializeDocumentMacSystemIntegrityProtectionConfigurationRequest(v.MacSystemIntegrityProtectionConfiguration, objectKey); err != nil { + return err + } + } + + if len(v.MacSystemIntegrityProtectionStatus) > 0 { + objectKey := object.Key("MacSystemIntegrityProtectionStatus") + objectKey.String(string(v.MacSystemIntegrityProtectionStatus)) + } + + if v.TagSpecifications != nil { + objectKey := object.FlatKey("TagSpecification") + if err := awsEc2query_serializeDocumentTagSpecificationList(v.TagSpecifications, objectKey); err != nil { + return err + } + } + + return nil +} + func awsEc2query_serializeOpDocumentCreateManagedPrefixListInput(v *CreateManagedPrefixListInput, value query.Value) error { object := value.Object() _ = object @@ -62414,6 +63807,11 @@ func awsEc2query_serializeOpDocumentCreateRouteInput(v *CreateRouteInput, value objectKey.String(*v.NetworkInterfaceId) } + if v.OdbNetworkArn != nil { + objectKey := object.Key("OdbNetworkArn") + objectKey.String(*v.OdbNetworkArn) + } + if v.RouteTableId != nil { objectKey := object.Key("RouteTableId") objectKey.String(*v.RouteTableId) @@ -63747,6 +65145,11 @@ func awsEc2query_serializeOpDocumentCreateVolumeInput(v *CreateVolumeInput, valu objectKey.String(*v.AvailabilityZone) } + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.ClientToken != nil { objectKey := object.Key("ClientToken") objectKey.String(*v.ClientToken) @@ -64192,6 +65595,11 @@ func awsEc2query_serializeOpDocumentCreateVpnConnectionInput(v *CreateVpnConnect } } + if v.PreSharedKeyStorage != nil { + objectKey := object.Key("PreSharedKeyStorage") + objectKey.String(*v.PreSharedKeyStorage) + } + if v.TagSpecifications != nil { objectKey := object.FlatKey("TagSpecification") if err := awsEc2query_serializeDocumentTagSpecificationList(v.TagSpecifications, objectKey); err != nil { @@ -64479,6 +65887,23 @@ func awsEc2query_serializeOpDocumentDeleteFpgaImageInput(v *DeleteFpgaImageInput return nil } +func awsEc2query_serializeOpDocumentDeleteImageUsageReportInput(v *DeleteImageUsageReportInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.ReportId != nil { + objectKey := object.Key("ReportId") + objectKey.String(*v.ReportId) + } + + return nil +} + func awsEc2query_serializeOpDocumentDeleteInstanceConnectEndpointInput(v *DeleteInstanceConnectEndpointInput, value query.Value) error { object := value.Object() _ = object @@ -65885,6 +67310,11 @@ func awsEc2query_serializeOpDocumentDeregisterImageInput(v *DeregisterImageInput object := value.Object() _ = object + if v.DeleteAssociatedSnapshots != nil { + objectKey := object.Key("DeleteAssociatedSnapshots") + objectKey.Boolean(*v.DeleteAssociatedSnapshots) + } + if v.DryRun != nil { objectKey := object.Key("DryRun") objectKey.Boolean(*v.DryRun) @@ -66329,6 +67759,88 @@ func awsEc2query_serializeOpDocumentDescribeCapacityBlockOfferingsInput(v *Descr objectKey.String(smithytime.FormatDateTime(*v.StartDateRange)) } + if v.UltraserverCount != nil { + objectKey := object.Key("UltraserverCount") + objectKey.Integer(*v.UltraserverCount) + } + + if v.UltraserverType != nil { + objectKey := object.Key("UltraserverType") + objectKey.String(*v.UltraserverType) + } + + return nil +} + +func awsEc2query_serializeOpDocumentDescribeCapacityBlocksInput(v *DescribeCapacityBlocksInput, value query.Value) error { + object := value.Object() + _ = object + + if v.CapacityBlockIds != nil { + objectKey := object.FlatKey("CapacityBlockId") + if err := awsEc2query_serializeDocumentCapacityBlockIds(v.CapacityBlockIds, objectKey); err != nil { + return err + } + } + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.Filters != nil { + objectKey := object.FlatKey("Filter") + if err := awsEc2query_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + + return nil +} + +func awsEc2query_serializeOpDocumentDescribeCapacityBlockStatusInput(v *DescribeCapacityBlockStatusInput, value query.Value) error { + object := value.Object() + _ = object + + if v.CapacityBlockIds != nil { + objectKey := object.FlatKey("CapacityBlockId") + if err := awsEc2query_serializeDocumentCapacityBlockIds(v.CapacityBlockIds, objectKey); err != nil { + return err + } + } + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.Filters != nil { + objectKey := object.FlatKey("Filter") + if err := awsEc2query_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + return nil } @@ -67427,6 +68939,47 @@ func awsEc2query_serializeOpDocumentDescribeImageAttributeInput(v *DescribeImage return nil } +func awsEc2query_serializeOpDocumentDescribeImageReferencesInput(v *DescribeImageReferencesInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.ImageIds != nil { + objectKey := object.FlatKey("ImageId") + if err := awsEc2query_serializeDocumentDescribeImageReferencesImageIdStringList(v.ImageIds, objectKey); err != nil { + return err + } + } + + if v.IncludeAllResourceTypes != nil { + objectKey := object.Key("IncludeAllResourceTypes") + objectKey.Boolean(*v.IncludeAllResourceTypes) + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + + if v.ResourceTypes != nil { + objectKey := object.FlatKey("ResourceType") + if err := awsEc2query_serializeDocumentResourceTypeRequestList(v.ResourceTypes, objectKey); err != nil { + return err + } + } + + return nil +} + func awsEc2query_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, value query.Value) error { object := value.Object() _ = object @@ -67487,6 +69040,92 @@ func awsEc2query_serializeOpDocumentDescribeImagesInput(v *DescribeImagesInput, return nil } +func awsEc2query_serializeOpDocumentDescribeImageUsageReportEntriesInput(v *DescribeImageUsageReportEntriesInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.Filters != nil { + objectKey := object.FlatKey("Filter") + if err := awsEc2query_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.ImageIds != nil { + objectKey := object.FlatKey("ImageId") + if err := awsEc2query_serializeDocumentDescribeImageUsageReportsImageIdStringList(v.ImageIds, objectKey); err != nil { + return err + } + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + + if v.ReportIds != nil { + objectKey := object.FlatKey("ReportId") + if err := awsEc2query_serializeDocumentImageUsageReportIdStringList(v.ReportIds, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsEc2query_serializeOpDocumentDescribeImageUsageReportsInput(v *DescribeImageUsageReportsInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.Filters != nil { + objectKey := object.FlatKey("Filter") + if err := awsEc2query_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.ImageIds != nil { + objectKey := object.FlatKey("ImageId") + if err := awsEc2query_serializeDocumentDescribeImageUsageReportsImageIdStringList(v.ImageIds, objectKey); err != nil { + return err + } + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + + if v.ReportIds != nil { + objectKey := object.FlatKey("ReportId") + if err := awsEc2query_serializeDocumentImageUsageReportIdStringList(v.ReportIds, objectKey); err != nil { + return err + } + } + + return nil +} + func awsEc2query_serializeOpDocumentDescribeImportImageTasksInput(v *DescribeImportImageTasksInput, value query.Value) error { object := value.Object() _ = object @@ -68662,6 +70301,42 @@ func awsEc2query_serializeOpDocumentDescribeMacHostsInput(v *DescribeMacHostsInp return nil } +func awsEc2query_serializeOpDocumentDescribeMacModificationTasksInput(v *DescribeMacModificationTasksInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.Filters != nil { + objectKey := object.FlatKey("Filter") + if err := awsEc2query_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.MacModificationTaskIds != nil { + objectKey := object.FlatKey("MacModificationTaskId") + if err := awsEc2query_serializeDocumentMacModificationTaskIdList(v.MacModificationTaskIds, objectKey); err != nil { + return err + } + } + + if v.MaxResults != nil { + objectKey := object.Key("MaxResults") + objectKey.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + objectKey := object.Key("NextToken") + objectKey.String(*v.NextToken) + } + + return nil +} + func awsEc2query_serializeOpDocumentDescribeManagedPrefixListsInput(v *DescribeManagedPrefixListsInput, value query.Value) error { object := value.Object() _ = object @@ -70132,6 +71807,11 @@ func awsEc2query_serializeOpDocumentDescribeSpotPriceHistoryInput(v *DescribeSpo objectKey.String(*v.AvailabilityZone) } + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.DryRun != nil { objectKey := object.Key("DryRun") objectKey.Boolean(*v.DryRun) @@ -73049,6 +74729,28 @@ func awsEc2query_serializeOpDocumentExportVerifiedAccessInstanceClientConfigurat return nil } +func awsEc2query_serializeOpDocumentGetActiveVpnTunnelStatusInput(v *GetActiveVpnTunnelStatusInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.VpnConnectionId != nil { + objectKey := object.Key("VpnConnectionId") + objectKey.String(*v.VpnConnectionId) + } + + if v.VpnTunnelOutsideIpAddress != nil { + objectKey := object.Key("VpnTunnelOutsideIpAddress") + objectKey.String(*v.VpnTunnelOutsideIpAddress) + } + + return nil +} + func awsEc2query_serializeOpDocumentGetAllowedImagesSettingsInput(v *GetAllowedImagesSettingsInput, value query.Value) error { object := value.Object() _ = object @@ -73444,6 +75146,11 @@ func awsEc2query_serializeOpDocumentGetInstanceTypesFromInstanceRequirementsInpu } } + if v.Context != nil { + objectKey := object.Key("Context") + objectKey.String(*v.Context) + } + if v.DryRun != nil { objectKey := object.Key("DryRun") objectKey.Boolean(*v.DryRun) @@ -74490,6 +76197,11 @@ func awsEc2query_serializeOpDocumentGetVpnConnectionDeviceSampleConfigurationInp objectKey.String(*v.InternetKeyExchangeVersion) } + if v.SampleType != nil { + objectKey := object.Key("SampleType") + objectKey.String(*v.SampleType) + } + if v.VpnConnectionDeviceTypeId != nil { objectKey := object.Key("VpnConnectionDeviceTypeId") objectKey.String(*v.VpnConnectionDeviceTypeId) @@ -74796,6 +76508,11 @@ func awsEc2query_serializeOpDocumentImportVolumeInput(v *ImportVolumeInput, valu objectKey.String(*v.AvailabilityZone) } + if v.AvailabilityZoneId != nil { + objectKey := object.Key("AvailabilityZoneId") + objectKey.String(*v.AvailabilityZoneId) + } + if v.Description != nil { objectKey := object.Key("Description") objectKey.String(*v.Description) @@ -75593,6 +77310,40 @@ func awsEc2query_serializeOpDocumentModifyInstanceCapacityReservationAttributesI return nil } +func awsEc2query_serializeOpDocumentModifyInstanceConnectEndpointInput(v *ModifyInstanceConnectEndpointInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if v.InstanceConnectEndpointId != nil { + objectKey := object.Key("InstanceConnectEndpointId") + objectKey.String(*v.InstanceConnectEndpointId) + } + + if len(v.IpAddressType) > 0 { + objectKey := object.Key("IpAddressType") + objectKey.String(string(v.IpAddressType)) + } + + if v.PreserveClientIp != nil { + objectKey := object.Key("PreserveClientIp") + objectKey.Boolean(*v.PreserveClientIp) + } + + if v.SecurityGroupIds != nil { + objectKey := object.FlatKey("SecurityGroupId") + if err := awsEc2query_serializeDocumentSecurityGroupIdStringListRequest(v.SecurityGroupIds, objectKey); err != nil { + return err + } + } + + return nil +} + func awsEc2query_serializeOpDocumentModifyInstanceCpuOptionsInput(v *ModifyInstanceCpuOptionsInput, value query.Value) error { object := value.Object() _ = object @@ -75724,6 +77475,11 @@ func awsEc2query_serializeOpDocumentModifyInstanceMaintenanceOptionsInput(v *Mod objectKey.String(*v.InstanceId) } + if len(v.RebootMigration) > 0 { + objectKey := object.Key("RebootMigration") + objectKey.String(string(v.RebootMigration)) + } + return nil } @@ -76215,6 +77971,13 @@ func awsEc2query_serializeOpDocumentModifyNetworkInterfaceAttributeInput(v *Modi object := value.Object() _ = object + if v.AssociatedSubnetIds != nil { + objectKey := object.FlatKey("AssociatedSubnetId") + if err := awsEc2query_serializeDocumentSubnetIdList(v.AssociatedSubnetIds, objectKey); err != nil { + return err + } + } + if v.AssociatePublicIpAddress != nil { objectKey := object.Key("AssociatePublicIpAddress") objectKey.Boolean(*v.AssociatePublicIpAddress) @@ -76312,6 +78075,28 @@ func awsEc2query_serializeOpDocumentModifyPrivateDnsNameOptionsInput(v *ModifyPr return nil } +func awsEc2query_serializeOpDocumentModifyPublicIpDnsNameOptionsInput(v *ModifyPublicIpDnsNameOptionsInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DryRun != nil { + objectKey := object.Key("DryRun") + objectKey.Boolean(*v.DryRun) + } + + if len(v.HostnameType) > 0 { + objectKey := object.Key("HostnameType") + objectKey.String(string(v.HostnameType)) + } + + if v.NetworkInterfaceId != nil { + objectKey := object.Key("NetworkInterfaceId") + objectKey.String(*v.NetworkInterfaceId) + } + + return nil +} + func awsEc2query_serializeOpDocumentModifyReservedInstancesInput(v *ModifyReservedInstancesInput, value query.Value) error { object := value.Object() _ = object @@ -77676,6 +79461,11 @@ func awsEc2query_serializeOpDocumentModifyVpnTunnelOptionsInput(v *ModifyVpnTunn objectKey.Boolean(*v.DryRun) } + if v.PreSharedKeyStorage != nil { + objectKey := object.Key("PreSharedKeyStorage") + objectKey.String(*v.PreSharedKeyStorage) + } + if v.SkipTunnelReplacement != nil { objectKey := object.Key("SkipTunnelReplacement") objectKey.Boolean(*v.SkipTunnelReplacement) @@ -78685,6 +80475,11 @@ func awsEc2query_serializeOpDocumentReplaceRouteInput(v *ReplaceRouteInput, valu objectKey.String(*v.NetworkInterfaceId) } + if v.OdbNetworkArn != nil { + objectKey := object.Key("OdbNetworkArn") + objectKey.String(*v.OdbNetworkArn) + } + if v.RouteTableId != nil { objectKey := object.Key("RouteTableId") objectKey.String(*v.RouteTableId) @@ -79936,6 +81731,11 @@ func awsEc2query_serializeOpDocumentStopInstancesInput(v *StopInstancesInput, va } } + if v.SkipOsShutdown != nil { + objectKey := object.Key("SkipOsShutdown") + objectKey.Boolean(*v.SkipOsShutdown) + } + return nil } @@ -79975,6 +81775,11 @@ func awsEc2query_serializeOpDocumentTerminateInstancesInput(v *TerminateInstance objectKey.Boolean(*v.DryRun) } + if v.Force != nil { + objectKey := object.Key("Force") + objectKey.Boolean(*v.Force) + } + if v.InstanceIds != nil { objectKey := object.FlatKey("InstanceId") if err := awsEc2query_serializeDocumentInstanceIdStringList(v.InstanceIds, objectKey); err != nil { @@ -79982,6 +81787,11 @@ func awsEc2query_serializeOpDocumentTerminateInstancesInput(v *TerminateInstance } } + if v.SkipOsShutdown != nil { + objectKey := object.Key("SkipOsShutdown") + objectKey.Boolean(*v.SkipOsShutdown) + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go index 97065c959..a89a78d90 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go @@ -988,6 +988,59 @@ func (CapacityBlockExtensionStatus) Values() []CapacityBlockExtensionStatus { } } +type CapacityBlockInterconnectStatus string + +// Enum values for CapacityBlockInterconnectStatus +const ( + CapacityBlockInterconnectStatusOk CapacityBlockInterconnectStatus = "ok" + CapacityBlockInterconnectStatusImpaired CapacityBlockInterconnectStatus = "impaired" + CapacityBlockInterconnectStatusInsufficientData CapacityBlockInterconnectStatus = "insufficient-data" +) + +// Values returns all known values for CapacityBlockInterconnectStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityBlockInterconnectStatus) Values() []CapacityBlockInterconnectStatus { + return []CapacityBlockInterconnectStatus{ + "ok", + "impaired", + "insufficient-data", + } +} + +type CapacityBlockResourceState string + +// Enum values for CapacityBlockResourceState +const ( + CapacityBlockResourceStateActive CapacityBlockResourceState = "active" + CapacityBlockResourceStateExpired CapacityBlockResourceState = "expired" + CapacityBlockResourceStateUnavailable CapacityBlockResourceState = "unavailable" + CapacityBlockResourceStateCancelled CapacityBlockResourceState = "cancelled" + CapacityBlockResourceStateFailed CapacityBlockResourceState = "failed" + CapacityBlockResourceStateScheduled CapacityBlockResourceState = "scheduled" + CapacityBlockResourceStatePaymentPending CapacityBlockResourceState = "payment-pending" + CapacityBlockResourceStatePaymentFailed CapacityBlockResourceState = "payment-failed" +) + +// Values returns all known values for CapacityBlockResourceState. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityBlockResourceState) Values() []CapacityBlockResourceState { + return []CapacityBlockResourceState{ + "active", + "expired", + "unavailable", + "cancelled", + "failed", + "scheduled", + "payment-pending", + "payment-failed", + } +} + type CapacityReservationBillingRequestStatus string // Enum values for CapacityReservationBillingRequestStatus @@ -1159,6 +1212,7 @@ const ( CapacityReservationStateAssessing CapacityReservationState = "assessing" CapacityReservationStateDelayed CapacityReservationState = "delayed" CapacityReservationStateUnsupported CapacityReservationState = "unsupported" + CapacityReservationStateUnavailable CapacityReservationState = "unavailable" ) // Values returns all known values for CapacityReservationState. Note that this @@ -1178,6 +1232,7 @@ func (CapacityReservationState) Values() []CapacityReservationState { "assessing", "delayed", "unsupported", + "unavailable", } } @@ -1989,6 +2044,9 @@ const ( Ec2InstanceConnectEndpointStateDeleteInProgress Ec2InstanceConnectEndpointState = "delete-in-progress" Ec2InstanceConnectEndpointStateDeleteComplete Ec2InstanceConnectEndpointState = "delete-complete" Ec2InstanceConnectEndpointStateDeleteFailed Ec2InstanceConnectEndpointState = "delete-failed" + Ec2InstanceConnectEndpointStateUpdateInProgress Ec2InstanceConnectEndpointState = "update-in-progress" + Ec2InstanceConnectEndpointStateUpdateComplete Ec2InstanceConnectEndpointState = "update-complete" + Ec2InstanceConnectEndpointStateUpdateFailed Ec2InstanceConnectEndpointState = "update-failed" ) // Values returns all known values for Ec2InstanceConnectEndpointState. Note that @@ -2004,6 +2062,9 @@ func (Ec2InstanceConnectEndpointState) Values() []Ec2InstanceConnectEndpointStat "delete-in-progress", "delete-complete", "delete-failed", + "update-in-progress", + "update-complete", + "update-failed", } } @@ -2121,6 +2182,27 @@ func (EndDateType) Values() []EndDateType { } } +type EndpointIpAddressType string + +// Enum values for EndpointIpAddressType +const ( + EndpointIpAddressTypeIpv4 EndpointIpAddressType = "ipv4" + EndpointIpAddressTypeIpv6 EndpointIpAddressType = "ipv6" + EndpointIpAddressTypeDualStack EndpointIpAddressType = "dual-stack" +) + +// Values returns all known values for EndpointIpAddressType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EndpointIpAddressType) Values() []EndpointIpAddressType { + return []EndpointIpAddressType{ + "ipv4", + "ipv6", + "dual-stack", + } +} + type EphemeralNvmeSupport string // Enum values for EphemeralNvmeSupport @@ -2917,6 +2999,50 @@ func (ImageBlockPublicAccessEnabledState) Values() []ImageBlockPublicAccessEnabl } } +type ImageReferenceOptionName string + +// Enum values for ImageReferenceOptionName +const ( + ImageReferenceOptionNameStateName ImageReferenceOptionName = "state-name" + ImageReferenceOptionNameVersionDepth ImageReferenceOptionName = "version-depth" +) + +// Values returns all known values for ImageReferenceOptionName. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ImageReferenceOptionName) Values() []ImageReferenceOptionName { + return []ImageReferenceOptionName{ + "state-name", + "version-depth", + } +} + +type ImageReferenceResourceType string + +// Enum values for ImageReferenceResourceType +const ( + ImageReferenceResourceTypeEc2Instance ImageReferenceResourceType = "ec2:Instance" + ImageReferenceResourceTypeEc2LaunchTemplate ImageReferenceResourceType = "ec2:LaunchTemplate" + ImageReferenceResourceTypeSsmParameter ImageReferenceResourceType = "ssm:Parameter" + ImageReferenceResourceTypeImageBuilderImageRecipe ImageReferenceResourceType = "imagebuilder:ImageRecipe" + ImageReferenceResourceTypeImageBuilderContainerRecipe ImageReferenceResourceType = "imagebuilder:ContainerRecipe" +) + +// Values returns all known values for ImageReferenceResourceType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ImageReferenceResourceType) Values() []ImageReferenceResourceType { + return []ImageReferenceResourceType{ + "ec2:Instance", + "ec2:LaunchTemplate", + "ssm:Parameter", + "imagebuilder:ImageRecipe", + "imagebuilder:ContainerRecipe", + } +} + type ImageState string // Enum values for ImageState @@ -2986,6 +3112,25 @@ func (ImdsSupportValues) Values() []ImdsSupportValues { } } +type InitializationType string + +// Enum values for InitializationType +const ( + InitializationTypeDefault InitializationType = "default" + InitializationTypeProvisionedRate InitializationType = "provisioned-rate" +) + +// Values returns all known values for InitializationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InitializationType) Values() []InitializationType { + return []InitializationType{ + "default", + "provisioned-rate", + } +} + type InstanceAttributeName string // Enum values for InstanceAttributeName @@ -3313,6 +3458,26 @@ func (InstanceMetadataTagsState) Values() []InstanceMetadataTagsState { } } +type InstanceRebootMigrationState string + +// Enum values for InstanceRebootMigrationState +const ( + InstanceRebootMigrationStateDisabled InstanceRebootMigrationState = "disabled" + InstanceRebootMigrationStateDefault InstanceRebootMigrationState = "default" +) + +// Values returns all known values for InstanceRebootMigrationState. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (InstanceRebootMigrationState) Values() []InstanceRebootMigrationState { + return []InstanceRebootMigrationState{ + "disabled", + "default", + } +} + type InstanceStateName string // Enum values for InstanceStateName @@ -4254,6 +4419,75 @@ const ( InstanceTypeF212xlarge InstanceType = "f2.12xlarge" InstanceTypeF248xlarge InstanceType = "f2.48xlarge" InstanceTypeTrn248xlarge InstanceType = "trn2.48xlarge" + InstanceTypeC7iFlex12xlarge InstanceType = "c7i-flex.12xlarge" + InstanceTypeC7iFlex16xlarge InstanceType = "c7i-flex.16xlarge" + InstanceTypeM7iFlex12xlarge InstanceType = "m7i-flex.12xlarge" + InstanceTypeM7iFlex16xlarge InstanceType = "m7i-flex.16xlarge" + InstanceTypeI7ieMetal24xl InstanceType = "i7ie.metal-24xl" + InstanceTypeI7ieMetal48xl InstanceType = "i7ie.metal-48xl" + InstanceTypeI8g48xlarge InstanceType = "i8g.48xlarge" + InstanceTypeC8gdMedium InstanceType = "c8gd.medium" + InstanceTypeC8gdLarge InstanceType = "c8gd.large" + InstanceTypeC8gdXlarge InstanceType = "c8gd.xlarge" + InstanceTypeC8gd2xlarge InstanceType = "c8gd.2xlarge" + InstanceTypeC8gd4xlarge InstanceType = "c8gd.4xlarge" + InstanceTypeC8gd8xlarge InstanceType = "c8gd.8xlarge" + InstanceTypeC8gd12xlarge InstanceType = "c8gd.12xlarge" + InstanceTypeC8gd16xlarge InstanceType = "c8gd.16xlarge" + InstanceTypeC8gd24xlarge InstanceType = "c8gd.24xlarge" + InstanceTypeC8gd48xlarge InstanceType = "c8gd.48xlarge" + InstanceTypeC8gdMetal24xl InstanceType = "c8gd.metal-24xl" + InstanceTypeC8gdMetal48xl InstanceType = "c8gd.metal-48xl" + InstanceTypeI7iLarge InstanceType = "i7i.large" + InstanceTypeI7iXlarge InstanceType = "i7i.xlarge" + InstanceTypeI7i2xlarge InstanceType = "i7i.2xlarge" + InstanceTypeI7i4xlarge InstanceType = "i7i.4xlarge" + InstanceTypeI7i8xlarge InstanceType = "i7i.8xlarge" + InstanceTypeI7i12xlarge InstanceType = "i7i.12xlarge" + InstanceTypeI7i16xlarge InstanceType = "i7i.16xlarge" + InstanceTypeI7i24xlarge InstanceType = "i7i.24xlarge" + InstanceTypeI7i48xlarge InstanceType = "i7i.48xlarge" + InstanceTypeI7iMetal24xl InstanceType = "i7i.metal-24xl" + InstanceTypeI7iMetal48xl InstanceType = "i7i.metal-48xl" + InstanceTypeP6B20048xlarge InstanceType = "p6-b200.48xlarge" + InstanceTypeM8gdMedium InstanceType = "m8gd.medium" + InstanceTypeM8gdLarge InstanceType = "m8gd.large" + InstanceTypeM8gdXlarge InstanceType = "m8gd.xlarge" + InstanceTypeM8gd2xlarge InstanceType = "m8gd.2xlarge" + InstanceTypeM8gd4xlarge InstanceType = "m8gd.4xlarge" + InstanceTypeM8gd8xlarge InstanceType = "m8gd.8xlarge" + InstanceTypeM8gd12xlarge InstanceType = "m8gd.12xlarge" + InstanceTypeM8gd16xlarge InstanceType = "m8gd.16xlarge" + InstanceTypeM8gd24xlarge InstanceType = "m8gd.24xlarge" + InstanceTypeM8gd48xlarge InstanceType = "m8gd.48xlarge" + InstanceTypeM8gdMetal24xl InstanceType = "m8gd.metal-24xl" + InstanceTypeM8gdMetal48xl InstanceType = "m8gd.metal-48xl" + InstanceTypeR8gdMedium InstanceType = "r8gd.medium" + InstanceTypeR8gdLarge InstanceType = "r8gd.large" + InstanceTypeR8gdXlarge InstanceType = "r8gd.xlarge" + InstanceTypeR8gd2xlarge InstanceType = "r8gd.2xlarge" + InstanceTypeR8gd4xlarge InstanceType = "r8gd.4xlarge" + InstanceTypeR8gd8xlarge InstanceType = "r8gd.8xlarge" + InstanceTypeR8gd12xlarge InstanceType = "r8gd.12xlarge" + InstanceTypeR8gd16xlarge InstanceType = "r8gd.16xlarge" + InstanceTypeR8gd24xlarge InstanceType = "r8gd.24xlarge" + InstanceTypeR8gd48xlarge InstanceType = "r8gd.48xlarge" + InstanceTypeR8gdMetal24xl InstanceType = "r8gd.metal-24xl" + InstanceTypeR8gdMetal48xl InstanceType = "r8gd.metal-48xl" + InstanceTypeC8gnMedium InstanceType = "c8gn.medium" + InstanceTypeC8gnLarge InstanceType = "c8gn.large" + InstanceTypeC8gnXlarge InstanceType = "c8gn.xlarge" + InstanceTypeC8gn2xlarge InstanceType = "c8gn.2xlarge" + InstanceTypeC8gn4xlarge InstanceType = "c8gn.4xlarge" + InstanceTypeC8gn8xlarge InstanceType = "c8gn.8xlarge" + InstanceTypeC8gn12xlarge InstanceType = "c8gn.12xlarge" + InstanceTypeC8gn16xlarge InstanceType = "c8gn.16xlarge" + InstanceTypeC8gn24xlarge InstanceType = "c8gn.24xlarge" + InstanceTypeC8gn48xlarge InstanceType = "c8gn.48xlarge" + InstanceTypeC8gnMetal24xl InstanceType = "c8gn.metal-24xl" + InstanceTypeC8gnMetal48xl InstanceType = "c8gn.metal-48xl" + InstanceTypeF26xlarge InstanceType = "f2.6xlarge" + InstanceTypeP6eGb20036xlarge InstanceType = "p6e-gb200.36xlarge" ) // Values returns all known values for InstanceType. Note that this can be @@ -5152,6 +5386,75 @@ func (InstanceType) Values() []InstanceType { "f2.12xlarge", "f2.48xlarge", "trn2.48xlarge", + "c7i-flex.12xlarge", + "c7i-flex.16xlarge", + "m7i-flex.12xlarge", + "m7i-flex.16xlarge", + "i7ie.metal-24xl", + "i7ie.metal-48xl", + "i8g.48xlarge", + "c8gd.medium", + "c8gd.large", + "c8gd.xlarge", + "c8gd.2xlarge", + "c8gd.4xlarge", + "c8gd.8xlarge", + "c8gd.12xlarge", + "c8gd.16xlarge", + "c8gd.24xlarge", + "c8gd.48xlarge", + "c8gd.metal-24xl", + "c8gd.metal-48xl", + "i7i.large", + "i7i.xlarge", + "i7i.2xlarge", + "i7i.4xlarge", + "i7i.8xlarge", + "i7i.12xlarge", + "i7i.16xlarge", + "i7i.24xlarge", + "i7i.48xlarge", + "i7i.metal-24xl", + "i7i.metal-48xl", + "p6-b200.48xlarge", + "m8gd.medium", + "m8gd.large", + "m8gd.xlarge", + "m8gd.2xlarge", + "m8gd.4xlarge", + "m8gd.8xlarge", + "m8gd.12xlarge", + "m8gd.16xlarge", + "m8gd.24xlarge", + "m8gd.48xlarge", + "m8gd.metal-24xl", + "m8gd.metal-48xl", + "r8gd.medium", + "r8gd.large", + "r8gd.xlarge", + "r8gd.2xlarge", + "r8gd.4xlarge", + "r8gd.8xlarge", + "r8gd.12xlarge", + "r8gd.16xlarge", + "r8gd.24xlarge", + "r8gd.48xlarge", + "r8gd.metal-24xl", + "r8gd.metal-48xl", + "c8gn.medium", + "c8gn.large", + "c8gn.xlarge", + "c8gn.2xlarge", + "c8gn.4xlarge", + "c8gn.8xlarge", + "c8gn.12xlarge", + "c8gn.16xlarge", + "c8gn.24xlarge", + "c8gn.48xlarge", + "c8gn.metal-24xl", + "c8gn.metal-48xl", + "f2.6xlarge", + "p6e-gb200.36xlarge", } } @@ -6489,6 +6792,68 @@ func (LogDestinationType) Values() []LogDestinationType { } } +type MacModificationTaskState string + +// Enum values for MacModificationTaskState +const ( + MacModificationTaskStateSuccessful MacModificationTaskState = "successful" + MacModificationTaskStateFailed MacModificationTaskState = "failed" + MacModificationTaskStateInprogress MacModificationTaskState = "in-progress" + MacModificationTaskStatePending MacModificationTaskState = "pending" +) + +// Values returns all known values for MacModificationTaskState. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MacModificationTaskState) Values() []MacModificationTaskState { + return []MacModificationTaskState{ + "successful", + "failed", + "in-progress", + "pending", + } +} + +type MacModificationTaskType string + +// Enum values for MacModificationTaskType +const ( + MacModificationTaskTypeSIPModification MacModificationTaskType = "sip-modification" + MacModificationTaskTypeVolumeOwnershipDelegation MacModificationTaskType = "volume-ownership-delegation" +) + +// Values returns all known values for MacModificationTaskType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MacModificationTaskType) Values() []MacModificationTaskType { + return []MacModificationTaskType{ + "sip-modification", + "volume-ownership-delegation", + } +} + +type MacSystemIntegrityProtectionSettingStatus string + +// Enum values for MacSystemIntegrityProtectionSettingStatus +const ( + MacSystemIntegrityProtectionSettingStatusEnabled MacSystemIntegrityProtectionSettingStatus = "enabled" + MacSystemIntegrityProtectionSettingStatusDisabled MacSystemIntegrityProtectionSettingStatus = "disabled" +) + +// Values returns all known values for MacSystemIntegrityProtectionSettingStatus. +// Note that this can be expanded in the future, and so it is only as up to date as +// the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (MacSystemIntegrityProtectionSettingStatus) Values() []MacSystemIntegrityProtectionSettingStatus { + return []MacSystemIntegrityProtectionSettingStatus{ + "enabled", + "disabled", + } +} + type ManagedBy string // Enum values for ManagedBy @@ -7316,6 +7681,46 @@ func (ProtocolValue) Values() []ProtocolValue { } } +type PublicIpDnsOption string + +// Enum values for PublicIpDnsOption +const ( + PublicIpDnsOptionPublicDualStackDnsName PublicIpDnsOption = "public-dual-stack-dns-name" + PublicIpDnsOptionPublicIpv4DnsName PublicIpDnsOption = "public-ipv4-dns-name" + PublicIpDnsOptionPublicIpv6DnsName PublicIpDnsOption = "public-ipv6-dns-name" +) + +// Values returns all known values for PublicIpDnsOption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (PublicIpDnsOption) Values() []PublicIpDnsOption { + return []PublicIpDnsOption{ + "public-dual-stack-dns-name", + "public-ipv4-dns-name", + "public-ipv6-dns-name", + } +} + +type RebootMigrationSupport string + +// Enum values for RebootMigrationSupport +const ( + RebootMigrationSupportUnsupported RebootMigrationSupport = "unsupported" + RebootMigrationSupportSupported RebootMigrationSupport = "supported" +) + +// Values returns all known values for RebootMigrationSupport. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (RebootMigrationSupport) Values() []RebootMigrationSupport { + return []RebootMigrationSupport{ + "unsupported", + "supported", + } +} + type RecurringChargeFrequency string // Enum values for RecurringChargeFrequency @@ -7559,6 +7964,7 @@ const ( ResourceTypeFpgaImage ResourceType = "fpga-image" ResourceTypeHostReservation ResourceType = "host-reservation" ResourceTypeImage ResourceType = "image" + ResourceTypeImageUsageReport ResourceType = "image-usage-report" ResourceTypeImportImageTask ResourceType = "import-image-task" ResourceTypeImportSnapshotTask ResourceType = "import-snapshot-task" ResourceTypeInstance ResourceType = "instance" @@ -7636,6 +8042,8 @@ const ( ResourceTypeInstanceConnectEndpoint ResourceType = "instance-connect-endpoint" ResourceTypeVerifiedAccessEndpointTarget ResourceType = "verified-access-endpoint-target" ResourceTypeIpamExternalResourceVerificationToken ResourceType = "ipam-external-resource-verification-token" + ResourceTypeCapacityBlock ResourceType = "capacity-block" + ResourceTypeMacModificationTask ResourceType = "mac-modification-task" ) // Values returns all known values for ResourceType. Note that this can be @@ -7661,6 +8069,7 @@ func (ResourceType) Values() []ResourceType { "fpga-image", "host-reservation", "image", + "image-usage-report", "import-image-task", "import-snapshot-task", "instance", @@ -7738,6 +8147,8 @@ func (ResourceType) Values() []ResourceType { "instance-connect-endpoint", "verified-access-endpoint-target", "ipam-external-resource-verification-token", + "capacity-block", + "mac-modification-task", } } @@ -7790,6 +8201,7 @@ const ( RouteOriginCreateRouteTable RouteOrigin = "CreateRouteTable" RouteOriginCreateRoute RouteOrigin = "CreateRoute" RouteOriginEnableVgwRoutePropagation RouteOrigin = "EnableVgwRoutePropagation" + RouteOriginAdvertisement RouteOrigin = "Advertisement" ) // Values returns all known values for RouteOrigin. Note that this can be expanded @@ -7801,6 +8213,7 @@ func (RouteOrigin) Values() []RouteOrigin { "CreateRouteTable", "CreateRoute", "EnableVgwRoutePropagation", + "Advertisement", } } @@ -8079,6 +8492,7 @@ type RouteState string const ( RouteStateActive RouteState = "active" RouteStateBlackhole RouteState = "blackhole" + RouteStateFiltered RouteState = "filtered" ) // Values returns all known values for RouteState. Note that this can be expanded @@ -8089,6 +8503,7 @@ func (RouteState) Values() []RouteState { return []RouteState{ "active", "blackhole", + "filtered", } } @@ -8412,6 +8827,31 @@ func (SnapshotLocationEnum) Values() []SnapshotLocationEnum { } } +type SnapshotReturnCodes string + +// Enum values for SnapshotReturnCodes +const ( + SnapshotReturnCodesSuccess SnapshotReturnCodes = "success" + SnapshotReturnCodesWarnSkipped SnapshotReturnCodes = "skipped" + SnapshotReturnCodesErrorMissingPermissions SnapshotReturnCodes = "missing-permissions" + SnapshotReturnCodesErrorCodeInternalError SnapshotReturnCodes = "internal-error" + SnapshotReturnCodesErrorCodeClientError SnapshotReturnCodes = "client-error" +) + +// Values returns all known values for SnapshotReturnCodes. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SnapshotReturnCodes) Values() []SnapshotReturnCodes { + return []SnapshotReturnCodes{ + "success", + "skipped", + "missing-permissions", + "internal-error", + "client-error", + } +} + type SnapshotState string // Enum values for SnapshotState @@ -8769,9 +9209,11 @@ type SubnetState string // Enum values for SubnetState const ( - SubnetStatePending SubnetState = "pending" - SubnetStateAvailable SubnetState = "available" - SubnetStateUnavailable SubnetState = "unavailable" + SubnetStatePending SubnetState = "pending" + SubnetStateAvailable SubnetState = "available" + SubnetStateUnavailable SubnetState = "unavailable" + SubnetStateFailed SubnetState = "failed" + SubnetStateFailedInsufficientCapacity SubnetState = "failed-insufficient-capacity" ) // Values returns all known values for SubnetState. Note that this can be expanded @@ -8783,6 +9225,8 @@ func (SubnetState) Values() []SubnetState { "pending", "available", "unavailable", + "failed", + "failed-insufficient-capacity", } } @@ -8995,6 +9439,27 @@ func (TrafficDirection) Values() []TrafficDirection { } } +type TrafficIpAddressType string + +// Enum values for TrafficIpAddressType +const ( + TrafficIpAddressTypeIpv4 TrafficIpAddressType = "ipv4" + TrafficIpAddressTypeIpv6 TrafficIpAddressType = "ipv6" + TrafficIpAddressTypeDualStack TrafficIpAddressType = "dual-stack" +) + +// Values returns all known values for TrafficIpAddressType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TrafficIpAddressType) Values() []TrafficIpAddressType { + return []TrafficIpAddressType{ + "ipv4", + "ipv6", + "dual-stack", + } +} + type TrafficMirrorFilterRuleField string // Enum values for TrafficMirrorFilterRuleField @@ -9171,6 +9636,7 @@ const ( TransitGatewayAttachmentResourceTypeConnect TransitGatewayAttachmentResourceType = "connect" TransitGatewayAttachmentResourceTypePeering TransitGatewayAttachmentResourceType = "peering" TransitGatewayAttachmentResourceTypeTgwPeering TransitGatewayAttachmentResourceType = "tgw-peering" + TransitGatewayAttachmentResourceTypeNetworkFunction TransitGatewayAttachmentResourceType = "network-function" ) // Values returns all known values for TransitGatewayAttachmentResourceType. Note @@ -9186,6 +9652,7 @@ func (TransitGatewayAttachmentResourceType) Values() []TransitGatewayAttachmentR "connect", "peering", "tgw-peering", + "network-function", } } @@ -9915,6 +10382,7 @@ const ( VolumeStatusInfoStatusOk VolumeStatusInfoStatus = "ok" VolumeStatusInfoStatusImpaired VolumeStatusInfoStatus = "impaired" VolumeStatusInfoStatusInsufficientData VolumeStatusInfoStatus = "insufficient-data" + VolumeStatusInfoStatusWarning VolumeStatusInfoStatus = "warning" ) // Values returns all known values for VolumeStatusInfoStatus. Note that this can @@ -9926,6 +10394,7 @@ func (VolumeStatusInfoStatus) Values() []VolumeStatusInfoStatus { "ok", "impaired", "insufficient-data", + "warning", } } @@ -9933,8 +10402,9 @@ type VolumeStatusName string // Enum values for VolumeStatusName const ( - VolumeStatusNameIoEnabled VolumeStatusName = "io-enabled" - VolumeStatusNameIoPerformance VolumeStatusName = "io-performance" + VolumeStatusNameIoEnabled VolumeStatusName = "io-enabled" + VolumeStatusNameIoPerformance VolumeStatusName = "io-performance" + VolumeStatusNameInitializationState VolumeStatusName = "initialization-state" ) // Values returns all known values for VolumeStatusName. Note that this can be @@ -9945,6 +10415,7 @@ func (VolumeStatusName) Values() []VolumeStatusName { return []VolumeStatusName{ "io-enabled", "io-performance", + "initialization-state", } } @@ -10349,6 +10820,27 @@ func (VpnStaticRouteSource) Values() []VpnStaticRouteSource { } } +type VpnTunnelProvisioningStatus string + +// Enum values for VpnTunnelProvisioningStatus +const ( + VpnTunnelProvisioningStatusAvailable VpnTunnelProvisioningStatus = "available" + VpnTunnelProvisioningStatusPending VpnTunnelProvisioningStatus = "pending" + VpnTunnelProvisioningStatusFailed VpnTunnelProvisioningStatus = "failed" +) + +// Values returns all known values for VpnTunnelProvisioningStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (VpnTunnelProvisioningStatus) Values() []VpnTunnelProvisioningStatus { + return []VpnTunnelProvisioningStatus{ + "available", + "pending", + "failed", + } +} + type WeekDay string // Enum values for WeekDay diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go index 07eb97f2d..d2f1ab071 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go @@ -155,6 +155,40 @@ type ActiveInstance struct { noSmithyDocumentSerde } +// Contains information about the current security configuration of an active VPN +// tunnel. +type ActiveVpnTunnelStatus struct { + + // The version of the Internet Key Exchange (IKE) protocol being used. + IkeVersion *string + + // The Diffie-Hellman group number being used in Phase 1 IKE negotiations. + Phase1DHGroup *int32 + + // The encryption algorithm negotiated in Phase 1 IKE negotiations. + Phase1EncryptionAlgorithm *string + + // The integrity algorithm negotiated in Phase 1 IKE negotiations. + Phase1IntegrityAlgorithm *string + + // The Diffie-Hellman group number being used in Phase 2 IKE negotiations. + Phase2DHGroup *int32 + + // The encryption algorithm negotiated in Phase 2 IKE negotiations. + Phase2EncryptionAlgorithm *string + + // The integrity algorithm negotiated in Phase 2 IKE negotiations. + Phase2IntegrityAlgorithm *string + + // The current provisioning status of the VPN tunnel. + ProvisioningStatus VpnTunnelProvisioningStatus + + // The reason for the current provisioning status. + ProvisioningStatusReason *string + + noSmithyDocumentSerde +} + // Describes a principal. type AddedPrincipal struct { @@ -329,6 +363,9 @@ type Address struct { // The only option supported today is alb . ServiceManaged ServiceManaged + // The ID of the subnet where the IP address is allocated. + SubnetId *string + // Any tags assigned to the Elastic IP address. Tags []Tag @@ -1256,6 +1293,54 @@ type CapacityAllocation struct { noSmithyDocumentSerde } +// Reserve powerful GPU instances on a future date to support your short duration +// machine learning (ML) workloads. Instances that run inside a Capacity Block are +// automatically placed close together inside [Amazon EC2 UltraClusters], for low-latency, petabit-scale, +// non-blocking networking. +// +// You can also reserve Amazon EC2 UltraServers. UltraServers connect multiple EC2 +// instances using a low-latency, high-bandwidth accelerator interconnect +// (NeuronLink). They are built to tackle very large-scale AI/ML workloads that +// require significant processing power. For more information, see Amazon EC2 +// UltraServers. +// +// [Amazon EC2 UltraClusters]: http://aws.amazon.com/ec2/ultraclusters/ +type CapacityBlock struct { + + // The Availability Zone of the Capacity Block. + AvailabilityZone *string + + // The Availability Zone ID of the Capacity Block. + AvailabilityZoneId *string + + // The ID of the Capacity Block. + CapacityBlockId *string + + // The ID of the Capacity Reservation. + CapacityReservationIds []string + + // The date and time at which the Capacity Block was created. + CreateDate *time.Time + + // The date and time at which the Capacity Block expires. When a Capacity Block + // expires, all instances in the Capacity Block are terminated. + EndDate *time.Time + + // The date and time at which the Capacity Block was started. + StartDate *time.Time + + // The state of the Capacity Block. + State CapacityBlockResourceState + + // The tags assigned to the Capacity Block. + Tags []Tag + + // The EC2 UltraServer type of the Capacity Block. + UltraserverType *string + + noSmithyDocumentSerde +} + // Describes a Capacity Block extension. With an extension, you can extend the // duration of time for an existing Capacity Block. type CapacityBlockExtension struct { @@ -1405,12 +1490,52 @@ type CapacityBlockOffering struct { // The tenancy of the Capacity Block. Tenancy CapacityReservationTenancy + // The number of EC2 UltraServers in the offering. + UltraserverCount *int32 + + // The EC2 UltraServer type of the Capacity Block offering. + UltraserverType *string + // The total price to be paid up front. UpfrontFee *string noSmithyDocumentSerde } +// Describes the availability of capacity for a Capacity Block. +type CapacityBlockStatus struct { + + // The ID of the Capacity Block. + CapacityBlockId *string + + // The availability of capacity for the Capacity Block reservations. + CapacityReservationStatuses []CapacityReservationStatus + + // The status of the high-bandwidth accelerator interconnect. Possible states + // include: + // + // - ok the accelerator interconnect is healthy. + // + // - impaired - accelerator interconnect communication is impaired. + // + // - insufficient-data - insufficient data to determine accelerator interconnect + // status. + InterconnectStatus CapacityBlockInterconnectStatus + + // The remaining capacity. Indicates the number of resources that can be launched + // into the Capacity Block. + TotalAvailableCapacity *int32 + + // The combined amount of Available and Unavailable capacity in the Capacity Block. + TotalCapacity *int32 + + // The unavailable capacity. Indicates the instance capacity that is unavailable + // for use due to a system status check failure. + TotalUnavailableCapacity *int32 + + noSmithyDocumentSerde +} + // Describes a Capacity Reservation. type CapacityReservation struct { @@ -1427,6 +1552,9 @@ type CapacityReservation struct { // Information about instance capacity usage. CapacityAllocations []CapacityAllocation + // The ID of the Capacity Block. + CapacityBlockId *string + // The Amazon Resource Name (ARN) of the Capacity Reservation. CapacityReservationArn *string @@ -1891,6 +2019,27 @@ type CapacityReservationSpecificationResponse struct { noSmithyDocumentSerde } +// Describes the availability of capacity for a Capacity Reservation. +type CapacityReservationStatus struct { + + // The ID of the Capacity Reservation. + CapacityReservationId *string + + // The remaining capacity. Indicates the amount of resources that can be launched + // into the Capacity Reservation. + TotalAvailableCapacity *int32 + + // The combined amount of Available and Unavailable capacity in the Capacity + // Reservation. + TotalCapacity *int32 + + // The used capacity. Indicates that the capacity is in use by resources that are + // running in the Capacity Reservation. + TotalUnavailableCapacity *int32 + + noSmithyDocumentSerde +} + // Describes a target Capacity Reservation or Capacity Reservation group. type CapacityReservationTarget struct { @@ -2235,6 +2384,12 @@ type ClientVpnConnection struct { // The IP address of the client. ClientIp *string + // The IPv6 address assigned to the client connection when using a dual-stack + // Client VPN endpoint. This field is only populated when the endpoint is + // configured for dual-stack addressing, and the client is using IPv6 for + // connectivity. + ClientIpv6Address *string + // The ID of the Client VPN endpoint to which the client is connected. ClientVpnEndpointId *string @@ -2357,6 +2512,11 @@ type ClientVpnEndpoint struct { // Information about the DNS servers to be used for DNS resolution. DnsServers []string + // The IP address type of the Client VPN endpoint. Possible values are ipv4 for + // IPv4 addressing only, ipv6 for IPv6 addressing only, or dual-stack for both + // IPv4 and IPv6 addressing. + EndpointIpAddressType EndpointIpAddressType + // The IDs of the security groups for the target network. SecurityGroupIds []string @@ -2387,6 +2547,11 @@ type ClientVpnEndpoint struct { // Any tags assigned to the Client VPN endpoint. Tags []Tag + // The IP address type of the Client VPN endpoint. Possible values are either ipv4 + // for IPv4 addressing only, ipv6 for IPv6 addressing only, or dual-stack for both + // IPv4 and IPv6 addressing. + TrafficIpAddressType TrafficIpAddressType + // The transport protocol used by the Client VPN endpoint. TransportProtocol TransportProtocol @@ -3204,10 +3369,12 @@ type CustomerGateway struct { // The name of customer gateway device. DeviceName *string - // IPv4 address for the customer gateway device's outside interface. The address - // must be static. If OutsideIpAddressType in your VPN connection options is set - // to PrivateIpv4 , you can use an RFC6598 or RFC1918 private IPv4 address. If - // OutsideIpAddressType is set to PublicIpv4 , you can use a public IPv4 address. + // The IP address for the customer gateway device's outside interface. The + // address must be static. If OutsideIpAddressType in your VPN connection options + // is set to PrivateIpv4 , you can use an RFC6598 or RFC1918 private IPv4 address. + // If OutsideIpAddressType is set to PublicIpv4 , you can use a public IPv4 + // address. If OutsideIpAddressType is set to Ipv6 , you can use a public IPv6 + // address. IpAddress *string // The current state of the customer gateway ( pending | available | deleting | @@ -3405,6 +3572,38 @@ type DeleteQueuedReservedInstancesError struct { noSmithyDocumentSerde } +// The snapshot ID and its deletion result code. +type DeleteSnapshotReturnCode struct { + + // The result code from the snapshot deletion attempt. Possible values: + // + // - success - The snapshot was successfully deleted. + // + // - skipped - The snapshot was not deleted because it's associated with other + // AMIs. + // + // - missing-permissions - The snapshot was not deleted because the role lacks + // DeleteSnapshot permissions. For more information, see [How Amazon EBS works with IAM]. + // + // - internal-error - The snapshot was not deleted due to a server error. + // + // - client-error - The snapshot was not deleted due to a client configuration + // error. + // + // For details about an error, check the DeleteSnapshot event in the CloudTrail + // event history. For more information, see [View event history]in the Amazon Web Services CloudTrail + // User Guide. + // + // [View event history]: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/tutorial-event-history.html + // [How Amazon EBS works with IAM]: https://docs.aws.amazon.com/ebs/latest/userguide/security_iam_service-with-iam.html + ReturnCode SnapshotReturnCodes + + // The ID of the snapshot. + SnapshotId *string + + noSmithyDocumentSerde +} + // Information about the tag keys to deregister for the current Region. You can // either specify individual tag keys or deregister all tag keys in the current // Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys in @@ -3894,6 +4093,40 @@ type DnsServersOptionsModifyStructure struct { // Describes a block device for an EBS volume. type EbsBlockDevice struct { + // The Availability Zone where the EBS volume will be created (for example, + // us-east-1a ). + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both. + // If neither is specified, Amazon EC2 automatically selects an Availability Zone + // within the Region. + // + // This parameter is not supported when using [CreateFleet], [CreateImage], [DescribeImages], [RequestSpotFleet], [RequestSpotInstances], and [RunInstances]. + // + // [DescribeImages]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html + // [CreateFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html + // [RequestSpotInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html + // [RunInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html + // [CreateImage]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html + // [RequestSpotFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html + AvailabilityZone *string + + // The ID of the Availability Zone where the EBS volume will be created (for + // example, use1-az1 ). + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both. + // If neither is specified, Amazon EC2 automatically selects an Availability Zone + // within the Region. + // + // This parameter is not supported when using [CreateFleet], [CreateImage], [DescribeImages], [RequestSpotFleet], [RequestSpotInstances], and [RunInstances]. + // + // [DescribeImages]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html + // [CreateFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html + // [RequestSpotInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html + // [RunInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html + // [CreateImage]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html + // [RequestSpotFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotFleet.html + AvailabilityZoneId *string + // Indicates whether the EBS volume is deleted on instance termination. For more // information, see [Preserving Amazon EBS volumes on instance termination]in the Amazon EC2 User Guide. // @@ -4104,7 +4337,8 @@ type EbsInfo struct { // Describes a parameter used to set up an EBS volume in a block device mapping. type EbsInstanceBlockDevice struct { - // The ARN of the Amazon ECS or Fargate task to which the volume is attached. + // The ARN of the Amazon Web Services-managed resource to which the volume is + // attached. AssociatedResource *string // The time stamp when the attachment initiated. @@ -4124,7 +4358,8 @@ type EbsInstanceBlockDevice struct { // The ID of the Amazon Web Services account that owns the volume. // - // This parameter is returned only for volumes that are attached to Fargate tasks. + // This parameter is returned only for volumes that are attached to Amazon Web + // Services-managed resources. VolumeOwnerId *string noSmithyDocumentSerde @@ -4208,7 +4443,8 @@ type Ec2InstanceConnectEndpoint struct { // The DNS name of the EC2 Instance Connect Endpoint. DnsName *string - // + // The Federal Information Processing Standards (FIPS) compliant DNS name of the + // EC2 Instance Connect Endpoint. FipsDnsName *string // The Amazon Resource Name (ARN) of the EC2 Instance Connect Endpoint. @@ -4217,6 +4453,9 @@ type Ec2InstanceConnectEndpoint struct { // The ID of the EC2 Instance Connect Endpoint. InstanceConnectEndpointId *string + // The IP address type of the endpoint. + IpAddressType IpAddressType + // The ID of the elastic network interface that Amazon EC2 automatically created // when creating the EC2 Instance Connect Endpoint. NetworkInterfaceIds []string @@ -4236,6 +4475,9 @@ type Ec2InstanceConnectEndpoint struct { // Default: true PreserveClientIp *bool + // The public DNS names of the endpoint. + PublicDnsNames *InstanceConnectEndpointPublicDnsNames + // The security groups associated with the endpoint. If you didn't specify a // security group, the default security group for your VPC is associated with the // endpoint. @@ -5301,11 +5543,16 @@ type FleetCapacityReservation struct { // Describes an EC2 Fleet. type FleetData struct { - // The progress of the EC2 Fleet. If there is an error, the status is error . After - // all requests are placed, the status is pending_fulfillment . If the size of the - // EC2 Fleet is equal to or greater than its target capacity, the status is - // fulfilled . If the size of the EC2 Fleet is decreased, the status is - // pending_termination while instances are terminating. + // The progress of the EC2 Fleet. + // + // For fleets of type instant , the status is fulfilled after all requests are + // placed, regardless of whether target capacity is met (this is the only possible + // status for instant fleets). + // + // For fleets of type request or maintain , the status is pending_fulfillment + // after all requests are placed, fulfilled when the fleet size meets or exceeds + // target capacity, pending_termination while instances are terminating when fleet + // size is decreased, and error if there's an error. ActivityStatus FleetActivityStatus // Unique, case-sensitive identifier that you provide to ensure the idempotency of @@ -6572,10 +6819,10 @@ type Image struct { // Any block device mapping entries. BlockDeviceMappings []BlockDeviceMapping - // The boot mode of the image. For more information, see [Boot modes] in the Amazon EC2 User + // The boot mode of the image. For more information, see [Instance launch behavior with Amazon EC2 boot modes] in the Amazon EC2 User // Guide. // - // [Boot modes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html + // [Instance launch behavior with Amazon EC2 boot modes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html BootMode BootModeValues // The date and time the image was created. @@ -6595,6 +6842,14 @@ type Image struct { // Specifies whether enhanced networking with ENA is enabled. EnaSupport *bool + // Indicates whether the image is eligible for Amazon Web Services Free Tier. + // + // - If true , the AMI is eligible for Free Tier and can be used to launch + // instances under the Free Tier limits. + // + // - If false , the AMI is not eligible for Free Tier. + FreeTierEligible *bool + // The hypervisor type of the image. Only xen is supported. ovm is not supported. Hypervisor HypervisorType @@ -6677,22 +6932,9 @@ type Image struct { RootDeviceType DeviceType // The ID of the source AMI from which the AMI was created. - // - // The ID only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The ID does not - // appear if the AMI was created using any other API. For some older AMIs, the ID - // might not be available. For more information, see [Identify the source AMI used to create a new AMI]in the Amazon EC2 User Guide. - // - // [Identify the source AMI used to create a new AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify-source-ami-used-to-create-new-ami.html SourceImageId *string // The Region of the source AMI. - // - // The Region only appears if the AMI was created using CreateImage, CopyImage, or CreateRestoreImageTask. The Region does - // not appear if the AMI was created using any other API. For some older AMIs, the - // Region might not be available. For more information, see [Identify the source AMI used to create a new AMI]in the Amazon EC2 User - // Guide. - // - // [Identify the source AMI used to create a new AMI]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify-source-ami-used-to-create-new-ami.html SourceImageRegion *string // The ID of the instance that the AMI was created from if the AMI was created @@ -6899,6 +7141,149 @@ type ImageRecycleBinInfo struct { noSmithyDocumentSerde } +// A resource that is referencing an image. +type ImageReference struct { + + // The Amazon Resource Name (ARN) of the resource referencing the image. + Arn *string + + // The ID of the referenced image. + ImageId *string + + // The type of resource referencing the image. + ResourceType ImageReferenceResourceType + + noSmithyDocumentSerde +} + +// The configuration and status of an image usage report. +type ImageUsageReport struct { + + // The IDs of the Amazon Web Services accounts that were specified when the report + // was created. + AccountIds []string + + // The date and time when the report was created. + CreationTime *time.Time + + // The date and time when Amazon EC2 will delete the report (30 days after the + // report was created). + ExpirationTime *time.Time + + // The ID of the image that was specified when the report was created. + ImageId *string + + // The ID of the report. + ReportId *string + + // The resource types that were specified when the report was created. + ResourceTypes []ImageUsageResourceType + + // The current state of the report. Possible values: + // + // - available - The report is available to view. + // + // - pending - The report is being created and not available to view. + // + // - error - The report could not be created. + State *string + + // Provides additional details when the report is in an error state. + StateReason *string + + // Any tags assigned to the report. + Tags []Tag + + noSmithyDocumentSerde +} + +// A single entry in an image usage report, detailing how an image is being used +// by a specific Amazon Web Services account and resource type. +type ImageUsageReportEntry struct { + + // The ID of the account that uses the image. + AccountId *string + + // The ID of the image. + ImageId *string + + // The date and time the report creation was initiated. + ReportCreationTime *time.Time + + // The ID of the report. + ReportId *string + + // The type of resource ( ec2:Instance or ec2:LaunchTemplate ). + ResourceType *string + + // The number of times resources of this type reference this image in the account. + UsageCount *int64 + + noSmithyDocumentSerde +} + +// A resource type to include in the report. Associated options can also be +// specified if the resource type is a launch template. +type ImageUsageResourceType struct { + + // The resource type. + // + // Valid values: ec2:Instance | ec2:LaunchTemplate + ResourceType *string + + // The options that affect the scope of the report. Valid only when ResourceType + // is ec2:LaunchTemplate . + ResourceTypeOptions []ImageUsageResourceTypeOption + + noSmithyDocumentSerde +} + +// The options that affect the scope of the report. +type ImageUsageResourceTypeOption struct { + + // The name of the option. + OptionName *string + + // The number of launch template versions to check. + OptionValues []string + + noSmithyDocumentSerde +} + +// The options that affect the scope of the report. +type ImageUsageResourceTypeOptionRequest struct { + + // The name of the option. + // + // Valid value: version-depth - The number of launch template versions to check. + OptionName *string + + // A value for the specified option. + // + // Valid values: Integers between 1 and 10000 + // + // Default: 20 + OptionValues []string + + noSmithyDocumentSerde +} + +// A resource type to include in the report. Associated options can also be +// specified if the resource type is a launch template. +type ImageUsageResourceTypeRequest struct { + + // The resource type. + // + // Valid values: ec2:Instance | ec2:LaunchTemplate + ResourceType *string + + // The options that affect the scope of the report. Valid only when ResourceType + // is ec2:LaunchTemplate . + ResourceTypeOptions []ImageUsageResourceTypeOptionRequest + + noSmithyDocumentSerde +} + // The request information of license configurations. type ImportImageLicenseConfigurationRequest struct { @@ -7046,6 +7431,9 @@ type ImportInstanceVolumeDetailItem struct { // The Availability Zone where the resulting instance will reside. AvailabilityZone *string + // The ID of the Availability Zone where the resulting instance will reside. + AvailabilityZoneId *string + // The number of bytes converted so far. BytesConverted *int64 @@ -7091,6 +7479,9 @@ type ImportVolumeTaskDetails struct { // The Availability Zone where the resulting volume will reside. AvailabilityZone *string + // The ID of the Availability Zone where the resulting volume will reside. + AvailabilityZoneId *string + // The number of bytes converted so far. BytesConverted *int64 @@ -7152,6 +7543,34 @@ type InferenceDeviceMemoryInfo struct { noSmithyDocumentSerde } +// Information about the volume initialization. For more information, see [Initialize Amazon EBS volumes]. +// +// [Initialize Amazon EBS volumes]: https://docs.aws.amazon.com/ebs/latest/userguide/initalize-volume.html +type InitializationStatusDetails struct { + + // The estimated remaining time, in seconds, for volume initialization to + // complete. Returns 0 when volume initialization has completed. + // + // Only available for volumes created with Amazon EBS Provisioned Rate for Volume + // Initialization. + EstimatedTimeToCompleteInSeconds *int64 + + // The method used for volume initialization. Possible values include: + // + // - default - Volume initialized using the default volume initialization rate or + // fast snapshot restore. + // + // - provisioned-rate - Volume initialized using an Amazon EBS Provisioned Rate + // for Volume Initialization. + InitializationType InitializationType + + // The current volume initialization progress as a percentage (0-100). Returns 100 + // when volume initialization has completed. + Progress *int64 + + noSmithyDocumentSerde +} + // Describes an instance. type Instance struct { @@ -7177,6 +7596,12 @@ type Instance struct { // [Boot modes]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-boot.html BootMode BootModeValues + // The ID of the Capacity Block. + // + // For P5 instances, a Capacity Block ID refers to a group of instances. For Trn2u + // instances, a capacity block ID refers to an EC2 UltraServer. + CapacityBlockId *string + // The ID of the Capacity Reservation. CapacityReservationId *string @@ -7313,9 +7738,11 @@ type Instance struct { // The product codes attached to this instance, if applicable. ProductCodes []ProductCode - // [IPv4 only] The public DNS name assigned to the instance. This name is not - // available until the instance enters the running state. This name is only - // available if you've enabled DNS hostnames for your VPC. + // The public DNS name assigned to the instance. This name is not available until + // the instance enters the running state. This name is only available if you've + // enabled DNS hostnames for your VPC. The format of this name depends on the [public hostname type]. + // + // [public hostname type]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hostname-types.html#public-hostnames PublicDnsName *string // The public IPv4 address, or the Carrier IP address assigned to the instance, if @@ -7475,6 +7902,33 @@ type InstanceCapacity struct { noSmithyDocumentSerde } +// The DNS names of the endpoint. +type InstanceConnectEndpointDnsNames struct { + + // The DNS name of the EC2 Instance Connect Endpoint. + DnsName *string + + // The Federal Information Processing Standards (FIPS) compliant DNS name of the + // EC2 Instance Connect Endpoint. + FipsDnsName *string + + noSmithyDocumentSerde +} + +// The public DNS names of the endpoint, including IPv4-only and dualstack DNS +// names. +type InstanceConnectEndpointPublicDnsNames struct { + + // The dualstack DNS name of the EC2 Instance Connect Endpoint. A dualstack DNS + // name supports connections from both IPv4 and IPv6 clients. + Dualstack *InstanceConnectEndpointDnsNames + + // The IPv4-only DNS name of the EC2 Instance Connect Endpoint. + Ipv4 *InstanceConnectEndpointDnsNames + + noSmithyDocumentSerde +} + // Describes a Reserved Instance listing state. type InstanceCount struct { @@ -7767,6 +8221,22 @@ type InstanceMaintenanceOptions struct { // instance. AutoRecovery InstanceAutoRecoveryState + // Specifies whether to attempt reboot migration during a user-initiated reboot of + // an instance that has a scheduled system-reboot event: + // + // - default - Amazon EC2 attempts to migrate the instance to new hardware + // (reboot migration). If successful, the system-reboot event is cleared. If + // unsuccessful, an in-place reboot occurs and the event remains scheduled. + // + // - disabled - Amazon EC2 keeps the instance on the same hardware (in-place + // reboot). The system-reboot event remains scheduled. + // + // This setting only applies to supported instances that have a scheduled reboot + // event. For more information, see [Enable or disable reboot migration]in the Amazon EC2 User Guide. + // + // [Enable or disable reboot migration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/schedevents_actions_reboot.html#reboot-migration + RebootMigration InstanceRebootMigrationState + noSmithyDocumentSerde } @@ -7973,7 +8443,7 @@ type InstanceNetworkInterface struct { // The type of network interface. // - // Valid values: interface | efa | efa-only | trunk + // Valid values: interface | efa | efa-only | evs | trunk InterfaceType *string // The IPv4 delegated prefixes that are assigned to the network interface. @@ -9107,6 +9577,9 @@ type InstanceStatus struct { // The Availability Zone of the instance. AvailabilityZone *string + // The ID of the Availability Zone of the instance. + AvailabilityZoneId *string + // Any scheduled events associated with the instance. Events []InstanceStatusEvent @@ -9229,6 +9702,10 @@ type InstanceTopology struct { // The name of the Availability Zone or Local Zone that the instance is in. AvailabilityZone *string + // The ID of the Capacity Block. This parameter is only supported for Ultraserver + // instances and identifies instances within the Ultraserver domain. + CapacityBlockId *string + // The name of the placement group that the instance is in. GroupName *string @@ -9333,6 +9810,13 @@ type InstanceTypeInfo struct { // Describes the processor. ProcessorInfo *ProcessorInfo + // Indicates whether reboot migration during a user-initiated reboot is supported + // for instances that have a scheduled system-reboot event. For more information, + // see [Enable or disable reboot migration]in the Amazon EC2 User Guide. + // + // [Enable or disable reboot migration]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/schedevents_actions_reboot.html#reboot-migration + RebootMigrationSupport RebootMigrationSupport + // The supported boot modes. For more information, see [Boot modes] in the Amazon EC2 User // Guide. // @@ -11758,6 +12242,9 @@ type LaunchTemplatePlacement struct { // The Availability Zone of the instance. AvailabilityZone *string + // The ID of the Availability Zone of the instance. + AvailabilityZoneId *string + // The Group ID of the placement group. You must specify the Placement Group Group // ID to launch an instance in a shared placement group. GroupId *string @@ -11792,8 +12279,15 @@ type LaunchTemplatePlacementRequest struct { Affinity *string // The Availability Zone for the instance. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both AvailabilityZone *string + // The ID of the Availability Zone for the instance. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both + AvailabilityZoneId *string + // The Group Id of a placement group. You must specify the Placement Group Group // Id to launch an instance in a shared placement group. GroupId *string @@ -12423,6 +12917,94 @@ type MacHost struct { noSmithyDocumentSerde } +// Information about a System Integrity Protection (SIP) modification task or +// volume ownership delegation task for an Amazon EC2 Mac instance. +type MacModificationTask struct { + + // The ID of the Amazon EC2 Mac instance. + InstanceId *string + + // The ID of task. + MacModificationTaskId *string + + // [SIP modification tasks only] Information about the SIP configuration. + MacSystemIntegrityProtectionConfig *MacSystemIntegrityProtectionConfiguration + + // The date and time the task was created, in the UTC timezone ( + // YYYY-MM-DDThh:mm:ss.sssZ ). + StartTime *time.Time + + // The tags assigned to the task. + Tags []Tag + + // The state of the task. + TaskState MacModificationTaskState + + // The type of task. + TaskType MacModificationTaskType + + noSmithyDocumentSerde +} + +// Describes the configuration for a System Integrity Protection (SIP) +// modification task. +type MacSystemIntegrityProtectionConfiguration struct { + + // Indicates whether Apple Internal was enabled or disabled by the task. + AppleInternal MacSystemIntegrityProtectionSettingStatus + + // Indicates whether Base System was enabled or disabled by the task. + BaseSystem MacSystemIntegrityProtectionSettingStatus + + // Indicates whether Dtrace Restrictions was enabled or disabled by the task. + DTraceRestrictions MacSystemIntegrityProtectionSettingStatus + + // Indicates whether Debugging Restrictions was enabled or disabled by the task. + DebuggingRestrictions MacSystemIntegrityProtectionSettingStatus + + // Indicates whether Filesystem Protections was enabled or disabled by the task. + FilesystemProtections MacSystemIntegrityProtectionSettingStatus + + // Indicates whether Kext Signing was enabled or disabled by the task. + KextSigning MacSystemIntegrityProtectionSettingStatus + + // Indicates whether NVRAM Protections was enabled or disabled by the task. + NvramProtections MacSystemIntegrityProtectionSettingStatus + + // Indicates SIP was enabled or disabled by the task. + Status MacSystemIntegrityProtectionSettingStatus + + noSmithyDocumentSerde +} + +// Describes a custom configuration for a System Integrity Protection (SIP) +// modification task. +type MacSystemIntegrityProtectionConfigurationRequest struct { + + // Enables or disables Apple Internal. + AppleInternal MacSystemIntegrityProtectionSettingStatus + + // Enables or disables Base System. + BaseSystem MacSystemIntegrityProtectionSettingStatus + + // Enables or disables Dtrace Restrictions. + DTraceRestrictions MacSystemIntegrityProtectionSettingStatus + + // Enables or disables Debugging Restrictions. + DebuggingRestrictions MacSystemIntegrityProtectionSettingStatus + + // Enables or disables Filesystem Protections. + FilesystemProtections MacSystemIntegrityProtectionSettingStatus + + // Enables or disables Kext Signing. + KextSigning MacSystemIntegrityProtectionSettingStatus + + // Enables or disables Nvram Protections. + NvramProtections MacSystemIntegrityProtectionSettingStatus + + noSmithyDocumentSerde +} + // Details for Site-to-Site VPN tunnel endpoint maintenance events. type MaintenanceDetails struct { @@ -12639,8 +13221,10 @@ type ModifyTransitGatewayOptions struct { // table. DefaultRouteTableAssociation DefaultRouteTableAssociationValue - // Enable or disable automatic propagation of routes to the default propagation - // route table. + // Indicates whether resource attachments automatically propagate routes to the + // default propagation route table. Enabled by default. If + // defaultRouteTablePropagation is set to enable , Amazon Web Services Transit + // Gateway will create the default transit gateway route table. DefaultRouteTablePropagation DefaultRouteTablePropagationValue // Enable or disable DNS support. @@ -13555,6 +14139,9 @@ type NetworkInsightsPath struct { // Describes a network interface. type NetworkInterface struct { + // The subnets associated with this network interface. + AssociatedSubnets []string + // The association information for an Elastic IP address (IPv4) associated with // the network interface. Association *NetworkInterfaceAssociation @@ -13618,7 +14205,9 @@ type NetworkInterface struct { // The Amazon Web Services account ID of the owner of the network interface. OwnerId *string - // The private DNS name. + // The private hostname. For more information, see [EC2 instance hostnames, DNS names, and domains] in the Amazon EC2 User Guide. + // + // [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html PrivateDnsName *string // The IPv4 address of the network interface within the subnet. @@ -13627,6 +14216,17 @@ type NetworkInterface struct { // The private IPv4 addresses associated with the network interface. PrivateIpAddresses []NetworkInterfacePrivateIpAddress + // A public hostname. For more information, see [EC2 instance hostnames, DNS names, and domains] in the Amazon EC2 User Guide. + // + // [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html + PublicDnsName *string + + // Public hostname type options. For more information, see [EC2 instance hostnames, DNS names, and domains] in the Amazon EC2 User + // Guide. + // + // [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html + PublicIpDnsNameOptions *PublicIpDnsNameOptions + // The alias or Amazon Web Services account ID of the principal or service that // created the network interface. RequesterId *string @@ -13783,6 +14383,13 @@ type NetworkInterfaceIpv6Address struct { // [ModifyNetworkInterfaceAttribute]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyNetworkInterfaceAttribute.html IsPrimaryIpv6 *bool + // An IPv6-enabled public hostname for a network interface. Requests from within + // the VPC or from the internet resolve to the IPv6 GUA of the network interface. + // For more information, see [EC2 instance hostnames, DNS names, and domains]in the Amazon EC2 User Guide. + // + // [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html + PublicIpv6DnsName *string + noSmithyDocumentSerde } @@ -14602,7 +15209,8 @@ type Placement struct { // The Availability Zone of the instance. // - // If not specified, an Availability Zone will be automatically chosen for you + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both. + // If neither is specified, Amazon EC2 automatically selects an Availability Zone // based on the load balancing criteria for the Region. // // This parameter is not supported for [CreateFleet]. @@ -14610,6 +15218,17 @@ type Placement struct { // [CreateFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet AvailabilityZone *string + // The ID of the Availability Zone of the instance. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both. + // If neither is specified, Amazon EC2 automatically selects an Availability Zone + // based on the load balancing criteria for the Region. + // + // This parameter is not supported for [CreateFleet]. + // + // [CreateFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet + AvailabilityZoneId *string + // The ID of the placement group that the instance is in. If you specify GroupId , // you can't specify GroupName . GroupId *string @@ -15038,6 +15657,37 @@ type PtrUpdateStatus struct { noSmithyDocumentSerde } +// Public hostname type options. For more information, see [EC2 instance hostnames, DNS names, and domains] in the Amazon EC2 User +// Guide. +// +// [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html +type PublicIpDnsNameOptions struct { + + // The public hostname type. For more information, see [EC2 instance hostnames, DNS names, and domains] in the Amazon EC2 User + // Guide. + // + // [EC2 instance hostnames, DNS names, and domains]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-naming.html + DnsHostnameType *string + + // A dual-stack public hostname for a network interface. Requests from within the + // VPC resolve to both the private IPv4 address and the IPv6 Global Unicast Address + // of the network interface. Requests from the internet resolve to both the public + // IPv4 and the IPv6 GUA address of the network interface. + PublicDualStackDnsName *string + + // An IPv4-enabled public hostname for a network interface. Requests from within + // the VPC resolve to the private primary IPv4 address of the network interface. + // Requests from the internet resolve to the public IPv4 address of the network + // interface. + PublicIpv4DnsName *string + + // An IPv6-enabled public hostname for a network interface. Requests from within + // the VPC or from the internet resolve to the IPv6 GUA of the network interface. + PublicIpv6DnsName *string + + noSmithyDocumentSerde +} + // Describes an IPv4 address pool. type PublicIpv4Pool struct { @@ -15408,6 +16058,9 @@ type RequestLaunchTemplateData struct { // Deprecated. // // Amazon Elastic Graphics reached end of life on January 8, 2024. + // + // Deprecated: Specifying Elastic Graphics accelerators is no longer supported on + // the RunInstances API. ElasticGpuSpecifications []ElasticGpuSpecification // Amazon Elastic Inference is no longer available. @@ -15417,6 +16070,9 @@ type RequestLaunchTemplateData struct { // instances to accelerate your Deep Learning (DL) inference workloads. // // You cannot specify accelerators from different generations in the same request. + // + // Deprecated: Specifying Elastic Inference accelerators is no longer supported on + // the RunInstances API. ElasticInferenceAccelerators []LaunchTemplateElasticInferenceAccelerator // Indicates whether the instance is enabled for Amazon Web Services Nitro @@ -16080,6 +16736,54 @@ type ResourceStatementRequest struct { noSmithyDocumentSerde } +// The options that affect the scope of the response. +type ResourceTypeOption struct { + + // The name of the option. + // + // - For ec2:Instance : + // + // Specify state-name - The current state of the EC2 instance. + // + // - For ec2:LaunchTemplate : + // + // Specify version-depth - The number of launch template versions to check, + // starting from the most recent version. + OptionName ImageReferenceOptionName + + // A value for the specified option. + // + // - For state-name : + // + // - Valid values: pending | running | shutting-down | terminated | stopping | + // stopped + // + // - Default: All states + // + // - For version-depth : + // + // - Valid values: Integers between 1 and 10000 + // + // - Default: 10 + OptionValues []string + + noSmithyDocumentSerde +} + +// A resource type to check for image references. Associated options can also be +// specified if the resource type is an EC2 instance or launch template. +type ResourceTypeRequest struct { + + // The resource type. + ResourceType ImageReferenceResourceType + + // The options that affect the scope of the response. Valid only when ResourceType + // is ec2:Instance or ec2:LaunchTemplate . + ResourceTypeOptions []ResourceTypeOption + + noSmithyDocumentSerde +} + // Describes the error that's returned when you cannot delete a launch template // version. type ResponseError struct { @@ -16314,6 +17018,10 @@ type Route struct { // The ID of Amazon Web Services account that owns the instance. InstanceOwnerId *string + // The next hop IP address for routes propagated by VPC Route Server into VPC + // route tables. + IpAddress *string + // The ID of the local gateway. LocalGatewayId *string @@ -16323,6 +17031,9 @@ type Route struct { // The ID of the network interface. NetworkInterfaceId *string + // The Amazon Resource Name (ARN) of the ODB network. + OdbNetworkArn *string + // Describes how the route was created. // // - CreateRouteTable - The route was automatically created when the route table @@ -16723,6 +17434,10 @@ type RouteTableAssociation struct { // Indicates whether this is the main route table. Main *bool + // The ID of a public IPv4 pool. A public IPv4 pool is a pool of IPv4 addresses + // that you've brought to Amazon Web Services with BYOIP. + PublicIpv4Pool *string + // The ID of the association. RouteTableAssociationId *string @@ -17472,6 +18187,9 @@ type SecurityGroupVpcAssociation struct { // The association's security group ID. GroupId *string + // The Amazon Web Services account ID of the owner of the security group. + GroupOwnerId *string + // The association's state. State SecurityGroupVpcAssociationState @@ -17494,7 +18212,14 @@ type ServiceConfiguration struct { // endpoint to the service must first be accepted. AcceptanceRequired *bool + // The IDs of the Availability Zones in which the service is available. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both + AvailabilityZoneIds []string + // The Availability Zones in which the service is available. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both AvailabilityZones []string // The DNS names for the service. @@ -17554,7 +18279,14 @@ type ServiceDetail struct { // accepted by the service owner. AcceptanceRequired *bool + // The IDs of the Availability Zones in which the service is available. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both + AvailabilityZoneIds []string + // The Availability Zones in which the service is available. + // + // Either AvailabilityZone or AvailabilityZoneId can be specified, but not both AvailabilityZones []string // The DNS names for the service. @@ -17796,7 +18528,8 @@ type Snapshot struct { TransferType TransferType // The ID of the volume that was used to create the snapshot. Snapshots created by - // the CopySnapshotaction have an arbitrary volume ID that should not be used for any purpose. + // a copy snapshot operation have an arbitrary volume ID that you should not use + // for any purpose. VolumeId *string // The size of the volume, in GiB. @@ -18528,8 +19261,17 @@ type SpotInstanceRequest struct { LaunchSpecification *LaunchSpecification // The Availability Zone in which the request is launched. + // + // Either launchedAvailabilityZone or launchedAvailabilityZoneId can be specified, + // but not both LaunchedAvailabilityZone *string + // The ID of the Availability Zone in which the request is launched. + // + // Either launchedAvailabilityZone or launchedAvailabilityZoneId can be specified, + // but not both + LaunchedAvailabilityZoneId *string + // The product description associated with the Spot Instance. ProductDescription RIProductDescription @@ -18959,6 +19701,9 @@ type SpotPrice struct { // The Availability Zone. AvailabilityZone *string + // The ID of the Availability Zone. + AvailabilityZoneId *string + // The instance type. InstanceType InstanceType @@ -19201,6 +19946,12 @@ type Subnet struct { PrivateDnsNameOptionsOnLaunch *PrivateDnsNameOptionsOnLaunch // The current state of the subnet. + // + // - failed : The underlying infrastructure to support the subnet failed to + // provision as expected. + // + // - failed-insufficient-capacity : The underlying infrastructure to support the + // subnet failed to provision due to a shortage of EC2 instance capacity. State SubnetState // The Amazon Resource Name (ARN) of the subnet. @@ -19212,6 +19963,13 @@ type Subnet struct { // Any tags assigned to the subnet. Tags []Tag + // Indicates if this is a subnet used with Amazon Elastic VMware Service (EVS). + // Possible values are Elastic VMware Service or no value. For more information + // about Amazon EVS, see [Amazon Elastic VMware Service API Reference]. + // + // [Amazon Elastic VMware Service API Reference]: https://docs.aws.amazon.com/evs/latest/APIReference/Welcome.html + Type *string + // The ID of the VPC the subnet is in. VpcId *string @@ -20273,15 +21031,16 @@ type TransitGatewayOptions struct { AutoAcceptSharedAttachments AutoAcceptSharedAttachmentsValue // Indicates whether resource attachments are automatically associated with the - // default association route table. Enabled by default. If - // defaultRouteTableAssociation is set to enable , Amazon Web Services Transit - // Gateway will create the default transit gateway route table. + // default association route table. Enabled by default. Either + // defaultRouteTableAssociation or defaultRouteTablePropagation must be set to + // enable for Amazon Web Services Transit Gateway to create the default transit + // gateway route table. DefaultRouteTableAssociation DefaultRouteTableAssociationValue // Indicates whether resource attachments automatically propagate routes to the // default propagation route table. Enabled by default. If // defaultRouteTablePropagation is set to enable , Amazon Web Services Transit - // Gateway will create the default transit gateway route table. + // Gateway creates the default transit gateway route table. DefaultRouteTablePropagation DefaultRouteTablePropagationValue // Indicates whether DNS support is enabled. @@ -21749,6 +22508,9 @@ type Volume struct { // The Availability Zone for the volume. AvailabilityZone *string + // The ID of the Availability Zone for the volume. + AvailabilityZoneId *string + // The time stamp when volume creation was initiated. CreateTime *time.Time @@ -21816,7 +22578,8 @@ type Volume struct { // Describes volume attachment details. type VolumeAttachment struct { - // The ARN of the Amazon ECS or Fargate task to which the volume is attached. + // The ARN of the Amazon Web Services-managed resource to which the volume is + // attached. AssociatedResource *string // The time stamp when the attachment initiated. @@ -21827,18 +22590,21 @@ type VolumeAttachment struct { // The device name. // - // If the volume is attached to a Fargate task, this parameter returns null . + // If the volume is attached to an Amazon Web Services-managed resource, this + // parameter returns null . Device *string // The ID of the instance. // - // If the volume is attached to a Fargate task, this parameter returns null . + // If the volume is attached to an Amazon Web Services-managed resource, this + // parameter returns null . InstanceId *string - // The service principal of Amazon Web Services service that owns the underlying - // instance to which the volume is attached. + // The service principal of the Amazon Web Services service that owns the + // underlying resource to which the volume is attached. // - // This parameter is returned only for volumes that are attached to Fargate tasks. + // This parameter is returned only for volumes that are attached to Amazon Web + // Services-managed resources. InstanceOwningService *string // The attachment state of the volume. @@ -21949,6 +22715,17 @@ type VolumeStatusAttachmentStatus struct { type VolumeStatusDetails struct { // The name of the volume status. + // + // - io-enabled - Indicates the volume I/O status. For more information, see [Amazon EBS volume status checks]. + // + // - io-performance - Indicates the volume performance status. For more + // information, see [Amazon EBS volume status checks]. + // + // - initialization-state - Indicates the status of the volume initialization + // process. For more information, see [Initialize Amazon EBS volumes]. + // + // [Amazon EBS volume status checks]: https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-checks.html + // [Initialize Amazon EBS volumes]: https://docs.aws.amazon.com/ebs/latest/userguide/initalize-volume.html Name VolumeStatusName // The intended status of the volume status. @@ -22011,6 +22788,17 @@ type VolumeStatusItem struct { // A list of events associated with the volume. Events []VolumeStatusEvent + // Information about the volume initialization. It can take up to 5 minutes for + // the volume initialization information to be updated. + // + // Only available for volumes created from snapshots. Not available for empty + // volumes created without a snapshot. + // + // For more information, see [Initialize Amazon EBS volumes]. + // + // [Initialize Amazon EBS volumes]: https://docs.aws.amazon.com/ebs/latest/userguide/initalize-volume.html + InitializationStatusDetails *InitializationStatusDetails + // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string @@ -22579,6 +23367,10 @@ type VpnConnection struct { // The VPN connection options. Options *VpnConnectionOptions + // The Amazon Resource Name (ARN) of the Secrets Manager secret storing the + // pre-shared key(s) for the VPN connection. + PreSharedKeyArn *string + // The static routes associated with the VPN connection. Routes []VpnStaticRoute @@ -22645,7 +23437,7 @@ type VpnConnectionOptions struct { // The type of IPv4 address assigned to the outside interface of the customer // gateway. // - // Valid values: PrivateIpv4 | PublicIpv4 + // Valid values: PrivateIpv4 | PublicIpv4 | Ipv6 // // Default: PublicIpv4 OutsideIpAddressType *string @@ -22690,10 +23482,10 @@ type VpnConnectionOptionsSpecification struct { // Default: ::/0 LocalIpv6NetworkCidr *string - // The type of IPv4 address assigned to the outside interface of the customer + // The type of IP address assigned to the outside interface of the customer // gateway device. // - // Valid values: PrivateIpv4 | PublicIpv4 + // Valid values: PrivateIpv4 | PublicIpv4 | Ipv6 // // Default: PublicIpv4 OutsideIpAddressType *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/validators.go index d7ee133fe..ea1103959 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/validators.go @@ -1270,21 +1270,21 @@ func (m *validateOpCreateCustomerGateway) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } -type validateOpCreateDefaultSubnet struct { +type validateOpCreateDelegateMacVolumeOwnershipTask struct { } -func (*validateOpCreateDefaultSubnet) ID() string { +func (*validateOpCreateDelegateMacVolumeOwnershipTask) ID() string { return "OperationInputValidation" } -func (m *validateOpCreateDefaultSubnet) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( +func (m *validateOpCreateDelegateMacVolumeOwnershipTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - input, ok := in.Parameters.(*CreateDefaultSubnetInput) + input, ok := in.Parameters.(*CreateDelegateMacVolumeOwnershipTaskInput) if !ok { return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) } - if err := validateOpCreateDefaultSubnetInput(input); err != nil { + if err := validateOpCreateDelegateMacVolumeOwnershipTaskInput(input); err != nil { return out, metadata, err } return next.HandleInitialize(ctx, in) @@ -1410,6 +1410,26 @@ func (m *validateOpCreateImage) HandleInitialize(ctx context.Context, in middlew return next.HandleInitialize(ctx, in) } +type validateOpCreateImageUsageReport struct { +} + +func (*validateOpCreateImageUsageReport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateImageUsageReport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateImageUsageReportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateImageUsageReportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateInstanceConnectEndpoint struct { } @@ -1690,6 +1710,26 @@ func (m *validateOpCreateLocalGatewayVirtualInterface) HandleInitialize(ctx cont return next.HandleInitialize(ctx, in) } +type validateOpCreateMacSystemIntegrityProtectionModificationTask struct { +} + +func (*validateOpCreateMacSystemIntegrityProtectionModificationTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateMacSystemIntegrityProtectionModificationTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMacSystemIntegrityProtectionModificationTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateMacSystemIntegrityProtectionModificationTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateManagedPrefixList struct { } @@ -2470,26 +2510,6 @@ func (m *validateOpCreateVerifiedAccessTrustProvider) HandleInitialize(ctx conte return next.HandleInitialize(ctx, in) } -type validateOpCreateVolume struct { -} - -func (*validateOpCreateVolume) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateVolume) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateVolumeInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateVolumeInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - type validateOpCreateVpcBlockPublicAccessExclusion struct { } @@ -2850,6 +2870,26 @@ func (m *validateOpDeleteFpgaImage) HandleInitialize(ctx context.Context, in mid return next.HandleInitialize(ctx, in) } +type validateOpDeleteImageUsageReport struct { +} + +func (*validateOpDeleteImageUsageReport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteImageUsageReport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteImageUsageReportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteImageUsageReportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteInstanceConnectEndpoint struct { } @@ -4550,6 +4590,26 @@ func (m *validateOpDescribeImageAttribute) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDescribeImageReferences struct { +} + +func (*validateOpDescribeImageReferences) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeImageReferences) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeImageReferencesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeImageReferencesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribeInstanceAttribute struct { } @@ -5850,6 +5910,26 @@ func (m *validateOpExportVerifiedAccessInstanceClientConfiguration) HandleInitia return next.HandleInitialize(ctx, in) } +type validateOpGetActiveVpnTunnelStatus struct { +} + +func (*validateOpGetActiveVpnTunnelStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetActiveVpnTunnelStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetActiveVpnTunnelStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetActiveVpnTunnelStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetAssociatedEnclaveCertificateIamRoles struct { } @@ -7170,6 +7250,26 @@ func (m *validateOpModifyInstanceCapacityReservationAttributes) HandleInitialize return next.HandleInitialize(ctx, in) } +type validateOpModifyInstanceConnectEndpoint struct { +} + +func (*validateOpModifyInstanceConnectEndpoint) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpModifyInstanceConnectEndpoint) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ModifyInstanceConnectEndpointInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpModifyInstanceConnectEndpointInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpModifyInstanceCpuOptions struct { } @@ -7510,6 +7610,26 @@ func (m *validateOpModifyPrivateDnsNameOptions) HandleInitialize(ctx context.Con return next.HandleInitialize(ctx, in) } +type validateOpModifyPublicIpDnsNameOptions struct { +} + +func (*validateOpModifyPublicIpDnsNameOptions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpModifyPublicIpDnsNameOptions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ModifyPublicIpDnsNameOptionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpModifyPublicIpDnsNameOptionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpModifyReservedInstances struct { } @@ -9842,8 +9962,8 @@ func addOpCreateCustomerGatewayValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpCreateCustomerGateway{}, middleware.After) } -func addOpCreateDefaultSubnetValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateDefaultSubnet{}, middleware.After) +func addOpCreateDelegateMacVolumeOwnershipTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateDelegateMacVolumeOwnershipTask{}, middleware.After) } func addOpCreateDhcpOptionsValidationMiddleware(stack *middleware.Stack) error { @@ -9870,6 +9990,10 @@ func addOpCreateImageValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateImage{}, middleware.After) } +func addOpCreateImageUsageReportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateImageUsageReport{}, middleware.After) +} + func addOpCreateInstanceConnectEndpointValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateInstanceConnectEndpoint{}, middleware.After) } @@ -9926,6 +10050,10 @@ func addOpCreateLocalGatewayVirtualInterfaceValidationMiddleware(stack *middlewa return stack.Initialize.Add(&validateOpCreateLocalGatewayVirtualInterface{}, middleware.After) } +func addOpCreateMacSystemIntegrityProtectionModificationTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateMacSystemIntegrityProtectionModificationTask{}, middleware.After) +} + func addOpCreateManagedPrefixListValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateManagedPrefixList{}, middleware.After) } @@ -10082,10 +10210,6 @@ func addOpCreateVerifiedAccessTrustProviderValidationMiddleware(stack *middlewar return stack.Initialize.Add(&validateOpCreateVerifiedAccessTrustProvider{}, middleware.After) } -func addOpCreateVolumeValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateVolume{}, middleware.After) -} - func addOpCreateVpcBlockPublicAccessExclusionValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateVpcBlockPublicAccessExclusion{}, middleware.After) } @@ -10158,6 +10282,10 @@ func addOpDeleteFpgaImageValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteFpgaImage{}, middleware.After) } +func addOpDeleteImageUsageReportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteImageUsageReport{}, middleware.After) +} + func addOpDeleteInstanceConnectEndpointValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteInstanceConnectEndpoint{}, middleware.After) } @@ -10498,6 +10626,10 @@ func addOpDescribeImageAttributeValidationMiddleware(stack *middleware.Stack) er return stack.Initialize.Add(&validateOpDescribeImageAttribute{}, middleware.After) } +func addOpDescribeImageReferencesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeImageReferences{}, middleware.After) +} + func addOpDescribeInstanceAttributeValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeInstanceAttribute{}, middleware.After) } @@ -10758,6 +10890,10 @@ func addOpExportVerifiedAccessInstanceClientConfigurationValidationMiddleware(st return stack.Initialize.Add(&validateOpExportVerifiedAccessInstanceClientConfiguration{}, middleware.After) } +func addOpGetActiveVpnTunnelStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetActiveVpnTunnelStatus{}, middleware.After) +} + func addOpGetAssociatedEnclaveCertificateIamRolesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetAssociatedEnclaveCertificateIamRoles{}, middleware.After) } @@ -11022,6 +11158,10 @@ func addOpModifyInstanceCapacityReservationAttributesValidationMiddleware(stack return stack.Initialize.Add(&validateOpModifyInstanceCapacityReservationAttributes{}, middleware.After) } +func addOpModifyInstanceConnectEndpointValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpModifyInstanceConnectEndpoint{}, middleware.After) +} + func addOpModifyInstanceCpuOptionsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpModifyInstanceCpuOptions{}, middleware.After) } @@ -11090,6 +11230,10 @@ func addOpModifyPrivateDnsNameOptionsValidationMiddleware(stack *middleware.Stac return stack.Initialize.Add(&validateOpModifyPrivateDnsNameOptions{}, middleware.After) } +func addOpModifyPublicIpDnsNameOptionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpModifyPublicIpDnsNameOptions{}, middleware.After) +} + func addOpModifyReservedInstancesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpModifyReservedInstances{}, middleware.After) } @@ -13410,9 +13554,6 @@ func validateOpCreateClientVpnEndpointInput(v *CreateClientVpnEndpointInput) err return nil } invalidParams := smithy.InvalidParamsError{Context: "CreateClientVpnEndpointInput"} - if v.ClientCidrBlock == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientCidrBlock")) - } if v.ServerCertificateArn == nil { invalidParams.Add(smithy.NewErrParamRequired("ServerCertificateArn")) } @@ -13498,13 +13639,16 @@ func validateOpCreateCustomerGatewayInput(v *CreateCustomerGatewayInput) error { } } -func validateOpCreateDefaultSubnetInput(v *CreateDefaultSubnetInput) error { +func validateOpCreateDelegateMacVolumeOwnershipTaskInput(v *CreateDelegateMacVolumeOwnershipTaskInput) error { if v == nil { return nil } - invalidParams := smithy.InvalidParamsError{Context: "CreateDefaultSubnetInput"} - if v.AvailabilityZone == nil { - invalidParams.Add(smithy.NewErrParamRequired("AvailabilityZone")) + invalidParams := smithy.InvalidParamsError{Context: "CreateDelegateMacVolumeOwnershipTaskInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.MacCredentials == nil { + invalidParams.Add(smithy.NewErrParamRequired("MacCredentials")) } if invalidParams.Len() > 0 { return invalidParams @@ -13620,6 +13764,24 @@ func validateOpCreateImageInput(v *CreateImageInput) error { } } +func validateOpCreateImageUsageReportInput(v *CreateImageUsageReportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateImageUsageReportInput"} + if v.ImageId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageId")) + } + if v.ResourceTypes == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceTypes")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateInstanceConnectEndpointInput(v *CreateInstanceConnectEndpointInput) error { if v == nil { return nil @@ -13868,6 +14030,24 @@ func validateOpCreateLocalGatewayVirtualInterfaceInput(v *CreateLocalGatewayVirt } } +func validateOpCreateMacSystemIntegrityProtectionModificationTaskInput(v *CreateMacSystemIntegrityProtectionModificationTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateMacSystemIntegrityProtectionModificationTaskInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if len(v.MacSystemIntegrityProtectionStatus) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacSystemIntegrityProtectionStatus")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateManagedPrefixListInput(v *CreateManagedPrefixListInput) error { if v == nil { return nil @@ -14599,21 +14779,6 @@ func validateOpCreateVerifiedAccessTrustProviderInput(v *CreateVerifiedAccessTru } } -func validateOpCreateVolumeInput(v *CreateVolumeInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateVolumeInput"} - if v.AvailabilityZone == nil { - invalidParams.Add(smithy.NewErrParamRequired("AvailabilityZone")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - func validateOpCreateVpcBlockPublicAccessExclusionInput(v *CreateVpcBlockPublicAccessExclusionInput) error { if v == nil { return nil @@ -14902,6 +15067,21 @@ func validateOpDeleteFpgaImageInput(v *DeleteFpgaImageInput) error { } } +func validateOpDeleteImageUsageReportInput(v *DeleteImageUsageReportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteImageUsageReportInput"} + if v.ReportId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReportId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteInstanceConnectEndpointInput(v *DeleteInstanceConnectEndpointInput) error { if v == nil { return nil @@ -16210,6 +16390,21 @@ func validateOpDescribeImageAttributeInput(v *DescribeImageAttributeInput) error } } +func validateOpDescribeImageReferencesInput(v *DescribeImageReferencesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeImageReferencesInput"} + if v.ImageIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImageIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribeInstanceAttributeInput(v *DescribeInstanceAttributeInput) error { if v == nil { return nil @@ -17297,6 +17492,24 @@ func validateOpExportVerifiedAccessInstanceClientConfigurationInput(v *ExportVer } } +func validateOpGetActiveVpnTunnelStatusInput(v *GetActiveVpnTunnelStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetActiveVpnTunnelStatusInput"} + if v.VpnConnectionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("VpnConnectionId")) + } + if v.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(smithy.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetAssociatedEnclaveCertificateIamRolesInput(v *GetAssociatedEnclaveCertificateIamRolesInput) error { if v == nil { return nil @@ -18105,9 +18318,6 @@ func validateOpImportVolumeInput(v *ImportVolumeInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "ImportVolumeInput"} - if v.AvailabilityZone == nil { - invalidParams.Add(smithy.NewErrParamRequired("AvailabilityZone")) - } if v.Image == nil { invalidParams.Add(smithy.NewErrParamRequired("Image")) } else if v.Image != nil { @@ -18400,6 +18610,21 @@ func validateOpModifyInstanceCapacityReservationAttributesInput(v *ModifyInstanc } } +func validateOpModifyInstanceConnectEndpointInput(v *ModifyInstanceConnectEndpointInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ModifyInstanceConnectEndpointInput"} + if v.InstanceConnectEndpointId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceConnectEndpointId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpModifyInstanceCpuOptionsInput(v *ModifyInstanceCpuOptionsInput) error { if v == nil { return nil @@ -18696,6 +18921,24 @@ func validateOpModifyPrivateDnsNameOptionsInput(v *ModifyPrivateDnsNameOptionsIn } } +func validateOpModifyPublicIpDnsNameOptionsInput(v *ModifyPublicIpDnsNameOptionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ModifyPublicIpDnsNameOptionsInput"} + if v.NetworkInterfaceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NetworkInterfaceId")) + } + if len(v.HostnameType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("HostnameType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpModifyReservedInstancesInput(v *ModifyReservedInstancesInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c81265a25..607fc0922 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.13.1 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. + +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. + +# v1.12.4 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. + # v1.12.3 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index d83e533ef..7a0b6aae2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.3" +const goModuleVersion = "1.13.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 2b5ceb4b5..cb68986ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,46 @@ +# v1.13.6 (2025-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2025-08-27) + +* **Dependency Update**: Update to smithy-go v1.23.0. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2025-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2025-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2025-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2025-07-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.15 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index a165a100f..88ba2f392 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.15" +const goModuleVersion = "1.13.6" diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 84ef0dad8..8b6ab2950 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,21 @@ +# Release (2025-08-27) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.23.0 + * **Feature**: Sort map keys in JSON Document types. + +# Release (2025-07-24) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.5 + * **Feature**: Add HTTP interceptors. + # Release (2025-06-16) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 08df74589..77a74ae0c 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -4,19 +4,21 @@ [Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. -The smithy-go runtime requires a minimum version of Go 1.20. +The smithy-go runtime requires a minimum version of Go 1.22. **WARNING: All interfaces are subject to change.** -## Can I use the code generators? +## :no_entry_sign: DO NOT use the code generators in this repository + +**The code generators in this repository do not generate working clients at +this time.** In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), in order to generate transport mechanisms and serialization/deserialization code ("serde") accordingly. -The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, -therefore the useability of this project on its own is currently limited. +The code generator does not currently support any protocols out of the box. Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are tracking the movement of those out of the SDK into smithy-go in @@ -31,6 +33,7 @@ This repository implements the following Smithy build plugins: |----|------------|-------------| | `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | | `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | +| `go-shape-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go shape code generation (types only) for Smithy models. | **NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** @@ -77,7 +80,7 @@ example created from `smithy init`: "service": "example.weather#Weather", "module": "github.com/example/weather", "generateGoMod": true, - "goDirective": "1.20" + "goDirective": "1.22" } } } @@ -87,6 +90,10 @@ example created from `smithy init`: This plugin is a work-in-progress and is currently undocumented. +## `go-shape-codegen` + +This plugin is a work-in-progress and is currently undocumented. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go index a93528397..f778272be 100644 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -9,7 +9,7 @@ import ( // Endpoint is the endpoint object returned by Endpoint resolution V2 type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. + // The complete URL minimally specifying the scheme and host. // May optionally specify the port and base path component. URI url.URL diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index c566c8337..945db0af3 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.4" +const goModuleVersion = "1.23.0" diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go new file mode 100644 index 000000000..e21f2632a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go @@ -0,0 +1,321 @@ +package http + +import ( + "context" +) + +func icopy[T any](v []T) []T { + s := make([]T, len(v)) + copy(s, v) + return s +} + +// InterceptorContext is all the information available in different +// interceptors. +// +// Not all information is available in each interceptor, see each interface +// definition for more details. +type InterceptorContext struct { + Input any + Request *Request + + Output any + Response *Response +} + +// InterceptorRegistry holds a list of operation interceptors. +// +// Interceptors allow callers to insert custom behavior at well-defined points +// within a client's operation lifecycle. +// +// # Interceptor context +// +// All interceptors are invoked with a context object that contains input and +// output containers for the operation. The individual fields that are +// available will depend on what the interceptor is and, in certain +// interceptors, how far the operation was able to progress. See the +// documentation for each interface definition for more information about field +// availability. +// +// Implementations MUST NOT directly mutate the values of the fields in the +// interceptor context. They are free to mutate the existing values _pointed +// to_ by those fields, however. +// +// # Returning errors +// +// All interceptors can return errors. If an interceptor returns an error +// _before_ the client's retry loop, the operation will fail immediately. If +// one returns an error _within_ the retry loop, the error WILL be considered +// according to the client's retry policy. +// +// # Adding interceptors +// +// Idiomatically you will simply use one of the Add() receiver methods to +// register interceptors as desired. However, the list for each interface is +// exported on the registry struct and the caller is free to manipulate it +// directly, for example, to register a number of interceptors all at once, or +// to remove one that was previously registered. +// +// The base SDK client WILL NOT add any interceptors. SDK operations and +// customizations are implemented in terms of middleware. +// +// Modifications to the registry will not persist across operation calls when +// using per-operation functional options. This means you can register +// interceptors on a per-operation basis without affecting other operations. +type InterceptorRegistry struct { + BeforeExecution []BeforeExecutionInterceptor + BeforeSerialization []BeforeSerializationInterceptor + AfterSerialization []AfterSerializationInterceptor + BeforeRetryLoop []BeforeRetryLoopInterceptor + BeforeAttempt []BeforeAttemptInterceptor + BeforeSigning []BeforeSigningInterceptor + AfterSigning []AfterSigningInterceptor + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor + BeforeDeserialization []BeforeDeserializationInterceptor + AfterDeserialization []AfterDeserializationInterceptor + AfterAttempt []AfterAttemptInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// Copy returns a deep copy of the registry. This is used by SDK clients on +// each operation call in order to prevent per-op config mutation from +// persisting. +func (i *InterceptorRegistry) Copy() InterceptorRegistry { + return InterceptorRegistry{ + BeforeExecution: icopy(i.BeforeExecution), + BeforeSerialization: icopy(i.BeforeSerialization), + AfterSerialization: icopy(i.AfterSerialization), + BeforeRetryLoop: icopy(i.BeforeRetryLoop), + BeforeAttempt: icopy(i.BeforeAttempt), + BeforeSigning: icopy(i.BeforeSigning), + AfterSigning: icopy(i.AfterSigning), + BeforeTransmit: icopy(i.BeforeTransmit), + AfterTransmit: icopy(i.AfterTransmit), + BeforeDeserialization: icopy(i.BeforeDeserialization), + AfterDeserialization: icopy(i.AfterDeserialization), + AfterAttempt: icopy(i.AfterAttempt), + AfterExecution: icopy(i.AfterExecution), + } +} + +// AddBeforeExecution registers the provided BeforeExecutionInterceptor. +func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) { + i.BeforeExecution = append(i.BeforeExecution, v) +} + +// AddBeforeSerialization registers the provided BeforeSerializationInterceptor. +func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) { + i.BeforeSerialization = append(i.BeforeSerialization, v) +} + +// AddAfterSerialization registers the provided AfterSerializationInterceptor. +func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) { + i.AfterSerialization = append(i.AfterSerialization, v) +} + +// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor. +func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) { + i.BeforeRetryLoop = append(i.BeforeRetryLoop, v) +} + +// AddBeforeAttempt registers the provided BeforeAttemptInterceptor. +func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) { + i.BeforeAttempt = append(i.BeforeAttempt, v) +} + +// AddBeforeSigning registers the provided BeforeSigningInterceptor. +func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) { + i.BeforeSigning = append(i.BeforeSigning, v) +} + +// AddAfterSigning registers the provided AfterSigningInterceptor. +func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) { + i.AfterSigning = append(i.AfterSigning, v) +} + +// AddBeforeTransmit registers the provided BeforeTransmitInterceptor. +func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) { + i.BeforeTransmit = append(i.BeforeTransmit, v) +} + +// AddAfterTransmit registers the provided AfterTransmitInterceptor. +func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) { + i.AfterTransmit = append(i.AfterTransmit, v) +} + +// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor. +func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) { + i.BeforeDeserialization = append(i.BeforeDeserialization, v) +} + +// AddAfterDeserialization registers the provided AfterDeserializationInterceptor. +func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) { + i.AfterDeserialization = append(i.AfterDeserialization, v) +} + +// AddAfterAttempt registers the provided AfterAttemptInterceptor. +func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) { + i.AfterAttempt = append(i.AfterAttempt, v) +} + +// AddAfterExecution registers the provided AfterExecutionInterceptor. +func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) { + i.AfterExecution = append(i.AfterExecution, v) +} + +// BeforeExecutionInterceptor runs before anything else in the operation +// lifecycle. +// +// Available InterceptorContext fields: +// - Input +type BeforeExecutionInterceptor interface { + BeforeExecution(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSerializationInterceptor runs before the operation input is serialized +// into its transport request. +// +// Serialization occurs before the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +type BeforeSerializationInterceptor interface { + BeforeSerialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterSerializationInterceptor runs after the operation input is serialized +// into its transport request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSerializationInterceptor interface { + AfterSerialization(ctx context.Context, in *InterceptorContext) error +} + +// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeRetryLoopInterceptor interface { + BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error +} + +// BeforeAttemptInterceptor runs right before every attempt in the retry loop. +// +// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be +// invoked. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeAttemptInterceptor interface { + BeforeAttempt(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSigningInterceptor runs right before the request is signed. +// +// Signing occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeSigningInterceptor interface { + BeforeSigning(ctx context.Context, in *InterceptorContext) error +} + +// AfterSigningInterceptor runs right after the request is signed. +// +// It is unsafe to modify the outgoing HTTP request at or past this hook, since +// doing so may invalidate the signature of the request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSigningInterceptor interface { + AfterSigning(ctx context.Context, in *InterceptorContext) error +} + +// BeforeTransmitInterceptor runs right before the HTTP request is sent. +// +// HTTP transmit occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeTransmitInterceptor interface { + BeforeTransmit(ctx context.Context, in *InterceptorContext) error +} + +// AfterTransmitInterceptor runs right after the HTTP response is received. +// +// It will always be invoked when a response is received, regardless of its +// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was +// not successful, e.g. because of a DNS resolution error +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type AfterTransmitInterceptor interface { + AfterTransmit(ctx context.Context, in *InterceptorContext) error +} + +// BeforeDeserializationInterceptor runs right before the incoming HTTP response +// is deserialized. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Deserialization occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type BeforeDeserializationInterceptor interface { + BeforeDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterDeserializationInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request +// - Response +type AfterDeserializationInterceptor interface { + AfterDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterAttemptInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error, or if another interceptor within the retry loop +// returned an error. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterAttemptInterceptor interface { + AfterAttempt(ctx context.Context, in *InterceptorContext) error +} + +// AfterExecutionInterceptor runs after everything else. It runs regardless of +// how far the operation progressed in its lifecycle, and regardless of whether +// the operation succeeded or failed. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterExecutionInterceptor interface { + AfterExecution(ctx context.Context, in *InterceptorContext) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go new file mode 100644 index 000000000..2cc4b57f8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go @@ -0,0 +1,325 @@ +package http + +import ( + "context" + "errors" + + "github.com/aws/smithy-go/middleware" +) + +type ictxKey struct{} + +func withIctx(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{}) +} + +func getIctx(ctx context.Context) *InterceptorContext { + return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext) +} + +// InterceptExecution runs Before/AfterExecutionInterceptors. +type InterceptExecution struct { + BeforeExecution []BeforeExecutionInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// ID identifies the middleware. +func (m *InterceptExecution) ID() string { + return "InterceptExecution" +} + +// HandleInitialize runs the interceptors. +func (m *InterceptExecution) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, md middleware.Metadata, err error, +) { + ctx = withIctx(ctx) + getIctx(ctx).Input = in.Parameters + + for _, i := range m.BeforeExecution { + if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleInitialize(ctx, in) + + for _, i := range m.AfterExecution { + if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeSerialization runs BeforeSerializationInterceptors. +type InterceptBeforeSerialization struct { + Interceptors []BeforeSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSerialization) ID() string { + return "InterceptBeforeSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptBeforeSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptAfterSerialization runs AfterSerializationInterceptors. +type InterceptAfterSerialization struct { + Interceptors []AfterSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSerialization) ID() string { + return "InterceptAfterSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptAfterSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + getIctx(ctx).Request = in.Request.(*Request) + + for _, i := range m.Interceptors { + if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors. +type InterceptBeforeRetryLoop struct { + Interceptors []BeforeRetryLoopInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeRetryLoop) ID() string { + return "InterceptBeforeRetryLoop" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptBeforeSigning runs BeforeSigningInterceptors. +type InterceptBeforeSigning struct { + Interceptors []BeforeSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSigning) ID() string { + return "InterceptBeforeSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptAfterSigning runs AfterSigningInterceptors. +type InterceptAfterSigning struct { + Interceptors []AfterSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSigning) ID() string { + return "InterceptAfterSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAfterSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors. +type InterceptTransmit struct { + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor +} + +// ID identifies the middleware. +func (m *InterceptTransmit) ID() string { + return "InterceptTransmit" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptTransmit) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeTransmit { + if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, md, err + } + + // the root of the decorated middleware guarantees this will be here + // (client.go: ClientHandler.Handle) + getIctx(ctx).Response = out.RawResponse.(*Response) + + for _, i := range m.AfterTransmit { + if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors. +type InterceptBeforeDeserialization struct { + Interceptors []BeforeDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeDeserialization) ID() string { + return "InterceptBeforeDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptBeforeDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + for _, i := range m.Interceptors { + if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAfterDeserialization runs AfterDeserializationInterceptors. +type InterceptAfterDeserialization struct { + Interceptors []AfterDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterDeserialization) ID() string { + return "InterceptAfterDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptAfterDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + getIctx(ctx).Output = out.Result + + for _, i := range m.Interceptors { + if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAttempt runs AfterAttemptInterceptors. +type InterceptAttempt struct { + BeforeAttempt []BeforeAttemptInterceptor + AfterAttempt []AfterAttemptInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAttempt) ID() string { + return "InterceptAttempt" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAttempt) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeAttempt { + if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleFinalize(ctx, in) + + for _, i := range m.AfterAttempt { + if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} diff --git a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml index bde0ae54e..bc79b8396 100644 --- a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml +++ b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml @@ -39,36 +39,28 @@ linters: enable-all: true disable: - cyclop - - deadcode - depguard - dupl - dupword - - exhaustivestruct - exhaustruct + - exportloopref - forbidigo - funlen - gci - gocognit - gocyclo - godox - - golint - - gomnd - - ifshort - - interfacer + - mnd - lll - maintidx - - maligned - nakedret - nestif - nlreturn - - nosnakecase - paralleltest - prealloc - rowserrcheck - - scopelint - - structcheck - testpackage - - varcheck + - tparallel - varnamelen - wastedassign @@ -76,5 +68,4 @@ issues: exclude-use-default: true max-issues-per-linter: 0 max-same-issues: 0 - # vim: set sw=2 ts=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/vendor/github.com/bombsimon/wsl/v4/analyzer.go index 46d5019a7..e51df89c6 100644 --- a/vendor/github.com/bombsimon/wsl/v4/analyzer.go +++ b/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -3,6 +3,7 @@ package wsl import ( "flag" "go/ast" + "go/token" "strings" "golang.org/x/tools/go/analysis" @@ -78,12 +79,17 @@ func (wa *wslAnalyzer) flags() flag.FlagSet { func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { for _, file := range pass.Files { - if !wa.config.IncludeGenerated && ast.IsGenerated(file) { + filename := getFilename(pass.Fset, file) + if !strings.HasSuffix(filename, ".go") { continue } - filename := pass.Fset.PositionFor(file.Pos(), false).Filename - if !strings.HasSuffix(filename, ".go") { + // if the file is related to cgo the filename of the unadjusted position is a not a '.go' file. + fn := pass.Fset.PositionFor(file.Pos(), false).Filename + + // The file is skipped if the "unadjusted" file is a Go file, and it's a generated file (ex: "_test.go" file). + // The other non-Go files are skipped by the first 'if' with the adjusted position. + if !wa.config.IncludeGenerated && ast.IsGenerated(file) && strings.HasSuffix(fn, ".go") { continue } @@ -127,7 +133,7 @@ type multiStringValue struct { // Set implements the flag.Value interface and will overwrite the pointer to the // slice with a new pointer after splitting the flag by comma. func (m *multiStringValue) Set(value string) error { - s := []string{} + var s []string for _, v := range strings.Split(value, ",") { s = append(s, strings.TrimSpace(v)) @@ -146,3 +152,12 @@ func (m *multiStringValue) String() string { return strings.Join(*m.slicePtr, ", ") } + +func getFilename(fset *token.FileSet, file *ast.File) string { + filename := fset.PositionFor(file.Pos(), true).Filename + if !strings.HasSuffix(filename, ".go") { + return fset.PositionFor(file.Pos(), false).Filename + } + + return filename +} diff --git a/vendor/github.com/bombsimon/wsl/v4/wsl.go b/vendor/github.com/bombsimon/wsl/v4/wsl.go index 76f4abf61..44c7abe21 100644 --- a/vendor/github.com/bombsimon/wsl/v4/wsl.go +++ b/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -353,7 +353,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { return false } - for j := 0; j < n; j++ { + for j := range n { s1 := statements[i+j] s2 := statements[i+j+1] @@ -1113,8 +1113,8 @@ func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne return } - blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line - blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line + blockStartLine = p.fileSet.Position(blockStartPos).Line + blockEndLine = p.fileSet.Position(blockEndPos).Line // No whitespace possible if LBrace and RBrace is on the same line. if blockStartLine == blockEndLine { @@ -1362,14 +1362,14 @@ func isExampleFunc(ident *ast.Ident) bool { } func (p *processor) nodeStart(node ast.Node) int { - return p.fileSet.PositionFor(node.Pos(), false).Line + return p.fileSet.Position(node.Pos()).Line } func (p *processor) nodeEnd(node ast.Node) int { - line := p.fileSet.PositionFor(node.End(), false).Line + line := p.fileSet.Position(node.End()).Line if isEmptyLabeledStmt(node) { - return p.fileSet.PositionFor(node.Pos(), false).Line + return p.fileSet.Position(node.Pos()).Line } return line @@ -1408,7 +1408,7 @@ func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string) } func (p *processor) addWarning(w string, pos token.Pos, t interface{}) { - position := p.fileSet.PositionFor(pos, false) + position := p.fileSet.Position(pos) p.warnings = append(p.warnings, fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), diff --git a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go index f68170fb3..ebf2a0dbe 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go @@ -8,12 +8,12 @@ import ( "strings" "sync" - "github.com/butuzov/ireturn/analyzer/internal/config" - "github.com/butuzov/ireturn/analyzer/internal/types" - "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" + + "github.com/butuzov/ireturn/analyzer/internal/config" + "github.com/butuzov/ireturn/analyzer/internal/types" ) const name string = "ireturn" // linter name @@ -23,11 +23,11 @@ type validator interface { } type analyzer struct { - once sync.Once - mu sync.RWMutex - handler validator - err error - diabledNolint bool + once sync.Once + mu sync.RWMutex + handler validator + err error + disabledNolint bool found []analysis.Diagnostic } @@ -63,7 +63,7 @@ func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { } // 003. Is it allowed to be checked? - if !a.diabledNolint && hasDisallowDirective(f.Doc) { + if !a.disabledNolint && hasDisallowDirective(f.Doc) { return } @@ -115,7 +115,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { // First: checking nonolint directive val := fs.Lookup("nonolint") if val != nil { - a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true" + a.disabledNolint = fs.Lookup("nonolint").Value.String() == "true" } // Second: validators implementation next @@ -128,7 +128,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { } func NewAnalyzer() *analysis.Analyzer { - a := analyzer{} //nolint: exhaustivestruct + a := analyzer{} return &analysis.Analyzer{ Name: name, @@ -196,7 +196,7 @@ func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{} typeParams := val.String() prefix, suffix := "interface{", "}" - if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple + if strings.HasPrefix(typeParams, prefix) { //nolint:gosimple typeParams = typeParams[len(prefix):] } if strings.HasSuffix(typeParams, suffix) { diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go index 6a294ca35..da101c786 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go @@ -2,7 +2,7 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" -// allowConfig specifies a list of interfaces (keywords, patters and regular expressions) +// allowConfig specifies a list of interfaces (keywords, patterns and regular expressions) // that are allowed by ireturn as valid to return, any non listed interface are rejected. type allowConfig struct { *defaultConfig diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go index 6aa04e52e..d6914af86 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go @@ -10,7 +10,6 @@ import ( var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time") -// nolint: exhaustivestruct func DefaultValidatorConfig() *allowConfig { return allowAll([]string{ types.NameEmpty, // "empty": empty interfaces (interface{}) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go index bef6913bb..b2cde910c 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go @@ -2,7 +2,7 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" -// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions) +// rejectConfig specifies a list of interfaces (keywords, patterns and regular expressions) // that are rejected by ireturn as valid to return, any non listed interface are allowed. type rejectConfig struct { *defaultConfig diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go index 5e576374d..0f4286515 100644 --- a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go @@ -47,7 +47,7 @@ func (i IFace) HashString() string { } func (i IFace) ExportDiagnostic() analysis.Diagnostic { - return analysis.Diagnostic{ //nolint: exhaustivestruct + return analysis.Diagnostic{ Pos: i.Pos, Message: i.String(), } diff --git a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md index 3dcc01e96..da30c8e00 100644 --- a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md +++ b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md @@ -1,201 +1,55 @@ - -func (*bufio.Writer) Write([]byte) (int, error) -func (*bufio.Writer) WriteString(string) (int, error) - - -func (*bufio.Writer) WriteRune(rune) (int, error) -func (*bufio.Writer) WriteString(string) (int, error) - - -func (*bytes.Buffer) Write([]byte) (int, error) -func (*bytes.Buffer) WriteString(string) (int, error) - - -func (*bytes.Buffer) WriteRune(rune) (int, error) -func (*bytes.Buffer) WriteString(string) (int, error) - - -func bytes.Compare([]byte, []byte) int -func strings.Compare(string, string) int - - -func bytes.Contains([]byte, []byte) bool -func strings.Contains(string, string) bool - - -func bytes.ContainsAny([]byte, string) bool -func strings.ContainsAny(string, string) bool - - -func bytes.ContainsRune([]byte, byte) bool -func strings.ContainsRune(string, byte) bool - - -func bytes.Count([]byte, []byte) int -func strings.Count(string, string) int - - -func bytes.EqualFold([]byte, []byte) bool -func strings.EqualFold(string, string) bool - - -func bytes.HasPrefix([]byte, []byte) bool -func strings.HasPrefix(string, string) bool - - -func bytes.HasSuffix([]byte, []byte) bool -func strings.HasSuffix(string, string) bool - - -func bytes.Index([]byte, []byte) int -func strings.Index(string, string) int - - -func bytes.IndexAny([]byte, string) int -func strings.IndexAny(string, string) int - - -func bytes.IndexByte([]byte, byte) int -func strings.IndexByte(string, byte) int - - -func bytes.IndexFunc([]byte, func(rune) bool) int -func strings.IndexFunc(string, func(rune) bool) int - - -func bytes.IndexRune([]byte, rune) int -func strings.IndexRune(string, rune) int - - -func bytes.LastIndex([]byte, []byte) int -func strings.LastIndex(string, string) int - - -func bytes.LastIndexAny([]byte, string) int -func strings.LastIndexAny(string, string) int - - -func bytes.LastIndexByte([]byte, byte) int -func strings.LastIndexByte(string, byte) int - - -func bytes.LastIndexFunc([]byte, func(rune) bool) int -func strings.LastIndexFunc(string, func(rune) bool) int - - -func bytes.NewBuffer([]byte) *bytes.Buffer -func bytes.NewBufferString(string) *bytes.Buffer - - -func (*httptest.ResponseRecorder) Write([]byte) (int, error) -func (*httptest.ResponseRecorder) WriteString(string) (int, error) - - -func (*maphash.Hash) Write([]byte) (int, error) -func (*maphash.Hash) WriteString(string) (int, error) - - -func (*os.File) Write([]byte) (int, error) -func (*os.File) WriteString(string) (int, error) - - -func regexp.Match(string, []byte) (bool, error) -func regexp.MatchString(string, string) (bool, error) - - -func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int -func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int - - -func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int -func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int - - -func (*regexp.Regexp) FindIndex([]byte) []int -func (*regexp.Regexp) FindStringIndex(string) []int - - -func (*regexp.Regexp) FindSubmatchIndex([]byte) []int -func (*regexp.Regexp) FindStringSubmatchIndex(string) []int - - -func (*regexp.Regexp) Match([]byte) bool -func (*regexp.Regexp) MatchString(string) bool - - -func (*strings.Builder) Write([]byte) (int, error) -func (*strings.Builder) WriteString(string) (int, error) - - -func (*strings.Builder) WriteRune(rune) (int, error) -func (*strings.Builder) WriteString(string) (int, error) - - -func strings.Compare(string) int -func bytes.Compare([]byte) int - - -func strings.Contains(string) bool -func bytes.Contains([]byte) bool - - -func strings.ContainsAny(string) bool -func bytes.ContainsAny([]byte) bool - - -func strings.ContainsRune(string) bool -func bytes.ContainsRune([]byte) bool - - -func strings.EqualFold(string) bool -func bytes.EqualFold([]byte) bool - - -func strings.HasPrefix(string) bool -func bytes.HasPrefix([]byte) bool - - -func strings.HasSuffix(string) bool -func bytes.HasSuffix([]byte) bool - - -func strings.Index(string) int -func bytes.Index([]byte) int - - -func strings.IndexFunc(string, func(r rune) bool) int -func bytes.IndexFunc([]byte, func(r rune) bool) int - - -func strings.LastIndex(string) int -func bytes.LastIndex([]byte) int - - -func strings.LastIndexAny(string) int -func bytes.LastIndexAny([]byte) int - - -func strings.LastIndexFunc(string, func(r rune) bool) int -func bytes.LastIndexFunc([]byte, func(r rune) bool) int - - -func utf8.DecodeLastRune([]byte) (rune, int) -func utf8.DecodeLastRuneInString(string) (rune, int) - - -func utf8.DecodeRune([]byte) (rune, int) -func utf8.DecodeRuneInString(string) (rune, int) - - -func utf8.FullRune([]byte) bool -func utf8.FullRuneInString(string) bool - - -func utf8.RuneCount([]byte) int -func utf8.RuneCountInString(string) int - - -func utf8.Valid([]byte) bool -func utf8.ValidString(string) bool - + +| Function | Mirror | +|----------|--------| +| `func (*bufio.Writer) Write([]byte) (int, error)` | `func (*bufio.Writer) WriteString(string) (int, error)` | +| `func (*bufio.Writer) WriteRune(rune) (int, error)` | `func (*bufio.Writer) WriteString(string) (int, error)` | +| `func (*bytes.Buffer) Write([]byte) (int, error)` | `func (*bytes.Buffer) WriteString(string) (int, error)` | +| `func (*bytes.Buffer) WriteRune(rune) (int, error)` | `func (*bytes.Buffer) WriteString(string) (int, error)` | +| `func bytes.Compare([]byte, []byte) int` | `func strings.Compare(string, string) int` | +| `func bytes.Contains([]byte, []byte) bool` | `func strings.Contains(string, string) bool` | +| `func bytes.ContainsAny([]byte, string) bool` | `func strings.ContainsAny(string, string) bool` | +| `func bytes.ContainsRune([]byte, byte) bool` | `func strings.ContainsRune(string, byte) bool` | +| `func bytes.Count([]byte, []byte) int` | `func strings.Count(string, string) int` | +| `func bytes.EqualFold([]byte, []byte) bool` | `func strings.EqualFold(string, string) bool` | +| `func bytes.HasPrefix([]byte, []byte) bool` | `func strings.HasPrefix(string, string) bool` | +| `func bytes.HasSuffix([]byte, []byte) bool` | `func strings.HasSuffix(string, string) bool` | +| `func bytes.Index([]byte, []byte) int` | `func strings.Index(string, string) int` | +| `func bytes.IndexAny([]byte, string) int` | `func strings.IndexAny(string, string) int` | +| `func bytes.IndexByte([]byte, byte) int` | `func strings.IndexByte(string, byte) int` | +| `func bytes.IndexFunc([]byte, func(rune) bool) int` | `func strings.IndexFunc(string, func(rune) bool) int` | +| `func bytes.IndexRune([]byte, rune) int` | `func strings.IndexRune(string, rune) int` | +| `func bytes.LastIndex([]byte, []byte) int` | `func strings.LastIndex(string, string) int` | +| `func bytes.LastIndexAny([]byte, string) int` | `func strings.LastIndexAny(string, string) int` | +| `func bytes.LastIndexByte([]byte, byte) int` | `func strings.LastIndexByte(string, byte) int` | +| `func bytes.LastIndexFunc([]byte, func(rune) bool) int` | `func strings.LastIndexFunc(string, func(rune) bool) int` | +| `func bytes.NewBuffer([]byte) *bytes.Buffer` | `func bytes.NewBufferString(string) *bytes.Buffer` | +| `func (*httptest.ResponseRecorder) Write([]byte) (int, error)` | `func (*httptest.ResponseRecorder) WriteString(string) (int, error)` | +| `func maphash.Bytes([]byte) uint64` | `func maphash.String(string) uint64` | +| `func (*maphash.Hash) Write([]byte) (int, error)` | `func (*maphash.Hash) WriteString(string) (int, error)` | +| `func (*os.File) Write([]byte) (int, error)` | `func (*os.File) WriteString(string) (int, error)` | +| `func regexp.Match(string, []byte) (bool, error)` | `func regexp.MatchString(string, string) (bool, error)` | +| `func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int` | `func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int` | +| `func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int` | `func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int` | +| `func (*regexp.Regexp) FindIndex([]byte) []int` | `func (*regexp.Regexp) FindStringIndex(string) []int` | +| `func (*regexp.Regexp) FindSubmatchIndex([]byte) []int` | `func (*regexp.Regexp) FindStringSubmatchIndex(string) []int` | +| `func (*regexp.Regexp) Match([]byte) bool` | `func (*regexp.Regexp) MatchString(string) bool` | +| `func (*strings.Builder) Write([]byte) (int, error)` | `func (*strings.Builder) WriteString(string) (int, error)` | +| `func (*strings.Builder) WriteRune(rune) (int, error)` | `func (*strings.Builder) WriteString(string) (int, error)` | +| `func strings.Compare(string) int` | `func bytes.Compare([]byte) int` | +| `func strings.Contains(string) bool` | `func bytes.Contains([]byte) bool` | +| `func strings.ContainsAny(string) bool` | `func bytes.ContainsAny([]byte) bool` | +| `func strings.ContainsRune(string) bool` | `func bytes.ContainsRune([]byte) bool` | +| `func strings.EqualFold(string) bool` | `func bytes.EqualFold([]byte) bool` | +| `func strings.HasPrefix(string) bool` | `func bytes.HasPrefix([]byte) bool` | +| `func strings.HasSuffix(string) bool` | `func bytes.HasSuffix([]byte) bool` | +| `func strings.Index(string) int` | `func bytes.Index([]byte) int` | +| `func strings.IndexFunc(string, func(r rune) bool) int` | `func bytes.IndexFunc([]byte, func(r rune) bool) int` | +| `func strings.LastIndex(string) int` | `func bytes.LastIndex([]byte) int` | +| `func strings.LastIndexAny(string) int` | `func bytes.LastIndexAny([]byte) int` | +| `func strings.LastIndexFunc(string, func(r rune) bool) int` | `func bytes.LastIndexFunc([]byte, func(r rune) bool) int` | +| `func utf8.DecodeLastRune([]byte) (rune, int)` | `func utf8.DecodeLastRuneInString(string) (rune, int)` | +| `func utf8.DecodeRune([]byte) (rune, int)` | `func utf8.DecodeRuneInString(string) (rune, int)` | +| `func utf8.FullRune([]byte) bool` | `func utf8.FullRuneInString(string) bool` | +| `func utf8.RuneCount([]byte) int` | `func utf8.RuneCountInString(string) int` | +| `func utf8.Valid([]byte) bool` | `func utf8.ValidString(string) bool` | diff --git a/vendor/github.com/butuzov/mirror/Makefile b/vendor/github.com/butuzov/mirror/Makefile index ac267208f..dab6f160a 100644 --- a/vendor/github.com/butuzov/mirror/Makefile +++ b/vendor/github.com/butuzov/mirror/Makefile @@ -10,7 +10,8 @@ endef # Generate Artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ generate: ## Generate Assets - $(MAKE) + $(MAKE) generate-tests + $(MAKE) generate-mirror-table generate-tests: ## Generates Assets at testdata go run ./cmd/internal/tests/ "$(PWD)/testdata" @@ -52,7 +53,7 @@ tests-summary: bin/tparse lints: ## Run golangci-lint lints: bin/golangci-lint lints: - golangci-lint run --no-config ./... --skip-dirs "^(cmd|testdata)" + golangci-lint run --no-config ./... --exclude-dirs "^(cmd|testdata)" cover: ## Run Coverage @@ -71,8 +72,8 @@ bin/tparse: INSTALL_URL=github.com/mfridman/tparse@v0.13.2 bin/tparse: $(call install_go_bin, tparse, $(INSTALL_URL)) -bin/golangci-lint: ## Installs golangci-lint@v1.55.2 (if not exists) -bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.55.2 +bin/golangci-lint: ## Installs golangci-lint@v1.62.0 (if not exists) +bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.62.0 bin/golangci-lint: $(call install_go_bin, golangci-lint, $(INSTALL_URL)) @@ -99,7 +100,7 @@ help: dep-gawk @ echo "" -# Helper Mehtods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Helper Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ dep-gawk: @ if [ -z "$(shell command -v gawk)" ]; then \ if [ -x /usr/local/bin/brew ]; then $(MAKE) _brew_gawk_install; exit 0; fi; \ @@ -111,21 +112,21 @@ dep-gawk: fi _brew_gawk_install: - @ echo "Instaling gawk using brew... " + @ echo "Installing gawk using brew... " @ brew install gawk --quiet @ echo "done" _ubuntu_gawk_install: - @ echo "Instaling gawk using apt-get... " + @ echo "Installing gawk using apt-get... " @ apt-get -q install gawk -y @ echo "done" _alpine_gawk_install: - @ echo "Instaling gawk using yum... " + @ echo "Installing gawk using yum... " @ apk add --update --no-cache gawk @ echo "done" _centos_gawk_install: - @ echo "Instaling gawk using yum... " + @ echo "Installing gawk using yum... " @ yum install -q -y gawk; @ echo "done" diff --git a/vendor/github.com/butuzov/mirror/analyzer.go b/vendor/github.com/butuzov/mirror/analyzer.go index 13ded46c6..b15019ce1 100644 --- a/vendor/github.com/butuzov/mirror/analyzer.go +++ b/vendor/github.com/butuzov/mirror/analyzer.go @@ -44,9 +44,9 @@ func Run(pass *analysis.Pass, withTests bool) []*checker.Violation { BytesFunctions, BytesBufferMethods, RegexpFunctions, RegexpRegexpMethods, StringFunctions, StringsBuilderMethods, + MaphashMethods, MaphashFunctions, BufioMethods, HTTPTestMethods, - OsFileMethods, MaphashMethods, - UTF8Functions, + OsFileMethods, UTF8Functions, ) check.Type = checker.WrapType(pass.TypesInfo) diff --git a/vendor/github.com/butuzov/mirror/checkers_maphash.go b/vendor/github.com/butuzov/mirror/checkers_maphash.go index 0aa43ff7b..345a64123 100644 --- a/vendor/github.com/butuzov/mirror/checkers_maphash.go +++ b/vendor/github.com/butuzov/mirror/checkers_maphash.go @@ -2,35 +2,66 @@ package mirror import "github.com/butuzov/mirror/internal/checker" -var MaphashMethods = []checker.Violation{ - { // (*hash/maphash).Write - Targets: checker.Bytes, - Type: checker.Method, - Package: "hash/maphash", - Struct: "Hash", - Caller: "Write", - Args: []int{0}, - AltCaller: "WriteString", +var ( + MaphashFunctions = []checker.Violation{ + { // maphash.Bytes + Targets: checker.Bytes, + Type: checker.Function, + Package: "hash/maphash", + Caller: "Bytes", + Args: []int{1}, + AltCaller: "String", - Generate: &checker.Generate{ - PreCondition: `h := maphash.Hash{}`, - Pattern: `Write($0)`, - Returns: []string{"int", "error"}, + Generate: &checker.Generate{ + Pattern: `Bytes(maphash.MakeSeed(), $0)`, + Returns: []string{"uint64"}, + }, }, - }, - { // (*hash/maphash).WriteString - Targets: checker.Strings, - Type: checker.Method, - Package: "hash/maphash", - Struct: "Hash", - Caller: "WriteString", - Args: []int{0}, - AltCaller: "Write", + { // maphash.String + Targets: checker.Strings, + Type: checker.Function, + Package: "hash/maphash", + Caller: "String", + Args: []int{1}, + AltCaller: "Bytes", - Generate: &checker.Generate{ - PreCondition: `h := maphash.Hash{}`, - Pattern: `WriteString($0)`, - Returns: []string{"int", "error"}, + Generate: &checker.Generate{ + Pattern: `String(maphash.MakeSeed(), $0)`, + Returns: []string{"uint64"}, + }, }, - }, -} + } + + MaphashMethods = []checker.Violation{ + { // (*hash/maphash).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `Write($0)`, + Returns: []string{"int", "error"}, + }, + }, + { // (*hash/maphash).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `WriteString($0)`, + Returns: []string{"int", "error"}, + }, + }, + } +) diff --git a/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/vendor/github.com/butuzov/mirror/internal/checker/checker.go index c1a941631..fb9ba4172 100644 --- a/vendor/github.com/butuzov/mirror/internal/checker/checker.go +++ b/vendor/github.com/butuzov/mirror/internal/checker/checker.go @@ -9,12 +9,12 @@ import ( "strings" ) -// Checker will perform standart check on package and its methods. +// Checker will perform standard check on package and its methods. type Checker struct { Violations []Violation // List of available violations Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct Type func(ast.Expr) string // Type Checker closure. - Print func(ast.Node) []byte // String representation of the expresion. + Print func(ast.Node) []byte // String representation of the expression. } func New(violations ...[]Violation) Checker { @@ -76,7 +76,7 @@ func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool continue } - // is it convertsion call + // is it conversion call if !c.callConverts(call) { continue } diff --git a/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/vendor/github.com/butuzov/mirror/internal/checker/violation.go index 3d8acf141..c2c149208 100644 --- a/vendor/github.com/butuzov/mirror/internal/checker/violation.go +++ b/vendor/github.com/butuzov/mirror/internal/checker/violation.go @@ -28,7 +28,7 @@ const ( UntypedRune string = "untyped rune" ) -// Violation describs what message we going to give to a particular code violation +// Violation describes what message we going to give to a particular code violation type Violation struct { Type ViolationType // Args []int // Indexes of the arguments needs to be checked @@ -143,7 +143,7 @@ func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic { v.AltPackage = v.Package } - // Hooray! we dont need to change package and redo imports. + // Hooray! we don't need to change package and redo imports. if v.Type == Function && v.AltPackage == v.Package && noNl { diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ Message: "Fix Issue With", @@ -166,7 +166,7 @@ type GolangIssue struct { Original string } -// Issue intended to be used only within `golangci-lint`, bu you can use use it +// Issue intended to be used only within `golangci-lint`, but you can use it // alongside Diagnostic if you wish. func (v *Violation) Issue(fSet *token.FileSet) GolangIssue { issue := GolangIssue{ diff --git a/vendor/github.com/butuzov/mirror/readme.md b/vendor/github.com/butuzov/mirror/readme.md index f830ea72e..f5cfa47a6 100644 --- a/vendor/github.com/butuzov/mirror/readme.md +++ b/vendor/github.com/butuzov/mirror/readme.md @@ -2,6 +2,13 @@ `mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib. +--- + +[![United 24](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner-personal-page.svg)](https://u24.gov.ua/) +[![Help Oleg Butuzov](https://raw.githubusercontent.com/butuzov/butuzov/main/personal.svg)](https://github.com/butuzov) + +--- + ## Linter Use Cases ### `github.com/argoproj/argo-cd` @@ -86,13 +93,13 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi - flag `--tests` (e.g. `--tests=false`) - flag `--skip-files` (e.g. `--skip-files="_test.go"`) - - yaml confguration `run.skip-files`: + - yaml configuration `run.skip-files`: ```yaml run: skip-files: - '(.+)_test\.go' ``` - - yaml confguration `issues.exclude-rules`: + - yaml configuration `issues.exclude-rules`: ```yaml issues: exclude-rules: @@ -106,7 +113,7 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi ```shell # Update Assets (testdata/(strings|bytes|os|utf8|maphash|regexp|bufio).go) -(task|make) generated +(task|make) generate # Run Tests (task|make) tests # Lint Code diff --git a/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go index 543b4bdbc..866d11083 100644 --- a/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go +++ b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go @@ -16,23 +16,41 @@ import ( "golang.org/x/tools/go/analysis" ) +type optionInt struct { + enabled bool + intConv bool +} + +type optionErr struct { + enabled bool + errError bool + errorf bool +} + +type optionStr struct { + enabled bool + sprintf1 bool + strconcat bool +} + type perfSprint struct { - intConv bool - errError bool - errorf bool - sprintf1 bool + intFormat optionInt + errFormat optionErr + strFormat optionStr + + boolFormat bool + hexFormat bool fiximports bool - strconcat bool } func newPerfSprint() *perfSprint { return &perfSprint{ - intConv: true, - errError: false, - errorf: true, - sprintf1: true, + intFormat: optionInt{enabled: true, intConv: true}, + errFormat: optionErr{enabled: true, errError: false, errorf: true}, + strFormat: optionStr{enabled: true, sprintf1: true, strconcat: true}, + boolFormat: true, + hexFormat: true, fiximports: true, - strconcat: true, } } @@ -40,27 +58,32 @@ func New() *analysis.Analyzer { n := newPerfSprint() r := &analysis.Analyzer{ Name: "perfsprint", + URL: "https://github.com/catenacyber/perfsprint", Doc: "Checks that fmt.Sprintf can be replaced with a faster alternative.", Run: n.run, Requires: []*analysis.Analyzer{inspect.Analyzer}, } - r.Flags.BoolVar(&n.intConv, "int-conversion", true, "optimizes even if it requires an int or uint type cast") - r.Flags.BoolVar(&n.errError, "err-error", false, "optimizes into err.Error() even if it is only equivalent for non-nil errors") - r.Flags.BoolVar(&n.errorf, "errorf", true, "optimizes fmt.Errorf") - r.Flags.BoolVar(&n.sprintf1, "sprintf1", true, "optimizes fmt.Sprintf with only one argument") - r.Flags.BoolVar(&n.fiximports, "fiximports", true, "fix needed imports from other fixes") - r.Flags.BoolVar(&n.strconcat, "strconcat", true, "optimizes into strings concatenation") + r.Flags.BoolVar(&n.intFormat.enabled, "integer-format", n.intFormat.enabled, "enable/disable optimization of integer formatting") + r.Flags.BoolVar(&n.intFormat.intConv, "int-conversion", n.intFormat.intConv, "optimizes even if it requires an int or uint type cast") + r.Flags.BoolVar(&n.errFormat.enabled, "error-format", n.errFormat.enabled, "enable/disable optimization of error formatting") + r.Flags.BoolVar(&n.errFormat.errError, "err-error", n.errFormat.errError, "optimizes into err.Error() even if it is only equivalent for non-nil errors") + r.Flags.BoolVar(&n.errFormat.errorf, "errorf", n.errFormat.errorf, "optimizes fmt.Errorf") + r.Flags.BoolVar(&n.boolFormat, "bool-format", n.boolFormat, "enable/disable optimization of bool formatting") + r.Flags.BoolVar(&n.hexFormat, "hex-format", n.hexFormat, "enable/disable optimization of hex formatting") + r.Flags.BoolVar(&n.strFormat.enabled, "string-format", n.strFormat.enabled, "enable/disable optimization of string formatting") + r.Flags.BoolVar(&n.strFormat.sprintf1, "sprintf1", n.strFormat.sprintf1, "optimizes fmt.Sprintf with only one argument") + r.Flags.BoolVar(&n.strFormat.strconcat, "strconcat", n.strFormat.strconcat, "optimizes into strings concatenation") + r.Flags.BoolVar(&n.fiximports, "fiximports", n.fiximports, "fix needed imports from other fixes") + return r } // true if verb is a format string that could be replaced with concatenation. func isConcatable(verb string) bool { - hasPrefix := - (strings.HasPrefix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || - (strings.HasPrefix(verb, "%[1]s") && !strings.Contains(verb, "%s")) - hasSuffix := - (strings.HasSuffix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || - (strings.HasSuffix(verb, "%[1]s") && !strings.Contains(verb, "%s")) + hasPrefix := (strings.HasPrefix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || + (strings.HasPrefix(verb, "%[1]s") && !strings.Contains(verb, "%s")) + hasSuffix := (strings.HasSuffix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || + (strings.HasSuffix(verb, "%[1]s") && !strings.Contains(verb, "%s")) if strings.Count(verb, "%[1]s") > 1 { return false @@ -69,6 +92,18 @@ func isConcatable(verb string) bool { } func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { + if !n.intFormat.enabled { + n.intFormat.intConv = false + } + if !n.errFormat.enabled { + n.errFormat.errError = false + n.errFormat.errorf = false + } + if !n.strFormat.enabled { + n.strFormat.sprintf1 = false + n.strFormat.strconcat = false + } + var fmtSprintObj, fmtSprintfObj, fmtErrorfObj types.Object for _, pkg := range pass.Pkg.Imports() { if pkg.Path() == "fmt" { @@ -81,7 +116,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { return nil, nil } removedFmtUsages := make(map[string]int) - neededPackages := make(map[string]map[string]bool) + neededPackages := make(map[string]map[string]struct{}) insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -102,28 +137,20 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { err error ) switch { - case calledObj == fmtErrorfObj && len(call.Args) == 1: - if n.errorf { - fn = "fmt.Errorf" - verb = "%s" - value = call.Args[0] - } else { - return - } + case calledObj == fmtErrorfObj && len(call.Args) == 1 && n.errFormat.errorf: + fn = "fmt.Errorf" + verb = "%s" + value = call.Args[0] case calledObj == fmtSprintObj && len(call.Args) == 1: fn = "fmt.Sprint" verb = "%v" value = call.Args[0] - case calledObj == fmtSprintfObj && len(call.Args) == 1: - if n.sprintf1 { - fn = "fmt.Sprintf" - verb = "%s" - value = call.Args[0] - } else { - return - } + case calledObj == fmtSprintfObj && len(call.Args) == 1 && n.strFormat.sprintf1: + fn = "fmt.Sprintf" + verb = "%s" + value = call.Args[0] case calledObj == fmtSprintfObj && len(call.Args) == 2: verbLit, ok := call.Args[0].(*ast.BasicLit) @@ -149,7 +176,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { switch verb { default: - if fn == "fmt.Sprintf" && isConcatable(verb) && n.strconcat { + if fn == "fmt.Sprintf" && isConcatable(verb) && n.strFormat.strconcat { break } return @@ -164,18 +191,17 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { switch { case isBasicType(valueType, types.String) && oneOf(verb, "%v", "%s"): fname := pass.Fset.File(call.Pos()).Name() - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) } removedFmtUsages[fname]++ - if fn == "fmt.Errorf" { - neededPackages[fname]["errors"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with errors.New", - SuggestedFixes: []analysis.SuggestedFix{ + if fn == "fmt.Errorf" && n.errFormat.enabled { + neededPackages[fname]["errors"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with errors.New", + []analysis.SuggestedFix{ { Message: "Use errors.New", TextEdits: []analysis.TextEdit{{ @@ -185,13 +211,13 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } - } else { - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with just using the string", - SuggestedFixes: []analysis.SuggestedFix{ + ) + } else if fn != "fmt.Errorf" && n.strFormat.enabled { + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with just using the string", + []analysis.SuggestedFix{ { Message: "Just use string value", TextEdits: []analysis.TextEdit{{ @@ -201,19 +227,19 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } + ) } - case types.Implements(valueType, errIface) && oneOf(verb, "%v", "%s") && n.errError: + case types.Implements(valueType, errIface) && oneOf(verb, "%v", "%s") && n.errFormat.errError: // known false positive if this error is nil // fmt.Sprint(nil) does not panic like nil.Error() does errMethodCall := formatNode(pass.Fset, value) + ".Error()" fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with " + errMethodCall, - SuggestedFixes: []analysis.SuggestedFix{ + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with "+errMethodCall, + []analysis.SuggestedFix{ { Message: "Use " + errMethodCall, TextEdits: []analysis.TextEdit{{ @@ -223,21 +249,20 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } + ) - case isBasicType(valueType, types.Bool) && oneOf(verb, "%v", "%t"): + case isBasicType(valueType, types.Bool) && oneOf(verb, "%v", "%t") && n.boolFormat: fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["strconv"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster strconv.FormatBool", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["strconv"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster strconv.FormatBool", + []analysis.SuggestedFix{ { Message: "Use strconv.FormatBool", TextEdits: []analysis.TextEdit{{ @@ -247,9 +272,9 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } + ) - case isArray && isBasicType(a.Elem(), types.Uint8) && oneOf(verb, "%x"): + case isArray && isBasicType(a.Elem(), types.Uint8) && oneOf(verb, "%x") && n.hexFormat: if _, ok := value.(*ast.Ident); !ok { // Doesn't support array literals. return @@ -257,16 +282,15 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["encoding/hex"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster hex.EncodeToString", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["encoding/hex"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster hex.EncodeToString", + []analysis.SuggestedFix{ { Message: "Use hex.EncodeToString", TextEdits: []analysis.TextEdit{ @@ -283,20 +307,19 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }, }, }, - } - case isSlice && isBasicType(s.Elem(), types.Uint8) && oneOf(verb, "%x"): + ) + case isSlice && isBasicType(s.Elem(), types.Uint8) && oneOf(verb, "%x") && n.hexFormat: fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["encoding/hex"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster hex.EncodeToString", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["encoding/hex"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster hex.EncodeToString", + []analysis.SuggestedFix{ { Message: "Use hex.EncodeToString", TextEdits: []analysis.TextEdit{{ @@ -306,21 +329,20 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } + ) - case isBasicType(valueType, types.Int8, types.Int16, types.Int32) && oneOf(verb, "%v", "%d") && n.intConv: + case isBasicType(valueType, types.Int8, types.Int16, types.Int32) && oneOf(verb, "%v", "%d") && n.intFormat.intConv: fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["strconv"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster strconv.Itoa", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["strconv"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster strconv.Itoa", + []analysis.SuggestedFix{ { Message: "Use strconv.Itoa", TextEdits: []analysis.TextEdit{ @@ -337,20 +359,19 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }, }, }, - } - case isBasicType(valueType, types.Int) && oneOf(verb, "%v", "%d"): + ) + case isBasicType(valueType, types.Int) && oneOf(verb, "%v", "%d") && n.intFormat.enabled: fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["strconv"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster strconv.Itoa", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["strconv"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster strconv.Itoa", + []analysis.SuggestedFix{ { Message: "Use strconv.Itoa", TextEdits: []analysis.TextEdit{{ @@ -360,20 +381,19 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } - case isBasicType(valueType, types.Int64) && oneOf(verb, "%v", "%d"): + ) + case isBasicType(valueType, types.Int64) && oneOf(verb, "%v", "%d") && n.intFormat.enabled: fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["strconv"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster strconv.FormatInt", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["strconv"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster strconv.FormatInt", + []analysis.SuggestedFix{ { Message: "Use strconv.FormatInt", TextEdits: []analysis.TextEdit{ @@ -390,25 +410,24 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }, }, }, - } + ) - case isBasicType(valueType, types.Uint8, types.Uint16, types.Uint32, types.Uint) && oneOf(verb, "%v", "%d", "%x") && n.intConv: + case isBasicType(valueType, types.Uint8, types.Uint16, types.Uint32, types.Uint) && oneOf(verb, "%v", "%d", "%x") && n.intFormat.intConv: base := []byte("), 10") if verb == "%x" { base = []byte("), 16") } fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["strconv"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster strconv.FormatUint", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["strconv"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster strconv.FormatUint", + []analysis.SuggestedFix{ { Message: "Use strconv.FormatUint", TextEdits: []analysis.TextEdit{ @@ -425,24 +444,23 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }, }, }, - } - case isBasicType(valueType, types.Uint64) && oneOf(verb, "%v", "%d", "%x"): + ) + case isBasicType(valueType, types.Uint64) && oneOf(verb, "%v", "%d", "%x") && n.intFormat.enabled: base := []byte(", 10") if verb == "%x" { base = []byte(", 16") } fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - _, ok := neededPackages[fname] - if !ok { - neededPackages[fname] = make(map[string]bool) - } - neededPackages[fname]["strconv"] = true - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with faster strconv.FormatUint", - SuggestedFixes: []analysis.SuggestedFix{ + if _, ok := neededPackages[fname]; !ok { + neededPackages[fname] = make(map[string]struct{}) + } + neededPackages[fname]["strconv"] = struct{}{} + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with faster strconv.FormatUint", + []analysis.SuggestedFix{ { Message: "Use strconv.FormatUint", TextEdits: []analysis.TextEdit{ @@ -459,8 +477,8 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }, }, }, - } - case isBasicType(valueType, types.String) && fn == "fmt.Sprintf" && isConcatable(verb): + ) + case isBasicType(valueType, types.String) && fn == "fmt.Sprintf" && isConcatable(verb) && n.strFormat.enabled: var fix string if strings.HasSuffix(verb, "%s") { fix = strconv.Quote(verb[:len(verb)-2]) + "+" + formatNode(pass.Fset, value) @@ -473,11 +491,11 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ - d = &analysis.Diagnostic{ - Pos: call.Pos(), - End: call.End(), - Message: fn + " can be replaced with string concatenation", - SuggestedFixes: []analysis.SuggestedFix{ + d = newAnalysisDiagnostic( + "", // TODO: precise checker + call, + fn+" can be replaced with string concatenation", + []analysis.SuggestedFix{ { Message: "Use string concatenation", TextEdits: []analysis.TextEdit{{ @@ -487,7 +505,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { }}, }, }, - } + ) } if d != nil { @@ -522,8 +540,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { gd := node.(*ast.ImportSpec) if gd.Path.Value == strconv.Quote(pkg.Path()) { fname := pass.Fset.File(gd.Pos()).Name() - _, ok := neededPackages[fname] - if ok { + if _, ok := neededPackages[fname]; ok { delete(neededPackages[fname], pkg.Path()) } } @@ -553,11 +570,11 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { for _, k := range keys { fix = fix + "\n\t\"" + k + `"` } - pass.Report(analysis.Diagnostic{ - Pos: gd.Pos(), - End: gd.End(), - Message: "Fix imports", - SuggestedFixes: []analysis.SuggestedFix{ + pass.Report(*newAnalysisDiagnostic( + "", // TODO: precise checker + gd, + "Fix imports", + []analysis.SuggestedFix{ { Message: "Fix imports", TextEdits: []analysis.TextEdit{{ @@ -566,7 +583,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { NewText: []byte(fix), }}, }, - }}) + })) } }) } diff --git a/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go b/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go new file mode 100644 index 000000000..f1d8d090e --- /dev/null +++ b/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go @@ -0,0 +1,24 @@ +package analyzer + +import ( + "golang.org/x/tools/go/analysis" +) + +func newAnalysisDiagnostic( + checker string, + analysisRange analysis.Range, + message string, + suggestedFixes []analysis.SuggestedFix, +) *analysis.Diagnostic { + if checker != "" { + message = checker + ": " + message + } + + return &analysis.Diagnostic{ + Pos: analysisRange.Pos(), + End: analysisRange.End(), + SuggestedFixes: suggestedFixes, + Message: message, + Category: checker, // Possible hashtag available on the documentation + } +} diff --git a/vendor/github.com/ckaznocha/intrange/.golangci.yml b/vendor/github.com/ckaznocha/intrange/.golangci.yml index 2ad830d1b..b240f85ce 100644 --- a/vendor/github.com/ckaznocha/intrange/.golangci.yml +++ b/vendor/github.com/ckaznocha/intrange/.golangci.yml @@ -1,6 +1,9 @@ linters-settings: gci: - local-prefixes: github.com/ckaznocha/intrange + sections: + - standard + - default + - localmodule gocritic: enabled-tags: - diagnostic @@ -10,10 +13,7 @@ linters-settings: - style goimports: local-prefixes: github.com/ckaznocha/intrange - golint: - min-confidence: 0 govet: - check-shadowing: true enable: - asmdecl - assign @@ -24,6 +24,7 @@ linters-settings: - cgocall - composite - copylock + - copyloopvar - deepequalerrors - errorsas - fieldalignment @@ -57,18 +58,16 @@ linters: - dupl - errcheck - errorlint - - exportloopref - gci - gochecknoinits - goconst - gocritic - godot - godox - - goerr113 + - err113 - gofmt - gofumpt - goimports - - gomnd - goprintffuncname - gosec - gosimple @@ -94,6 +93,6 @@ linters: - wastedassign - whitespace - wsl -run: - skip-dirs: +issues: + exclude-dirs: - testdata/ diff --git a/vendor/github.com/ckaznocha/intrange/intrange.go b/vendor/github.com/ckaznocha/intrange/intrange.go index 44a15091e..229c847d5 100644 --- a/vendor/github.com/ckaznocha/intrange/intrange.go +++ b/vendor/github.com/ckaznocha/intrange/intrange.go @@ -79,6 +79,8 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } + initAssign := init.Tok == token.ASSIGN + if len(init.Lhs) != 1 || len(init.Rhs) != 1 { return } @@ -97,16 +99,13 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } - var nExpr ast.Expr + var ( + operand ast.Expr + hasEquivalentOperator bool + ) switch cond.Op { - case token.LSS: // ;i < n; - if isBenchmark(cond.Y) { - return - } - - nExpr = findNExpr(cond.Y) - + case token.LSS, token.LEQ: // ;i < n; || ;i <= n; x, ok := cond.X.(*ast.Ident) if !ok { return @@ -115,13 +114,10 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if x.Name != initIdent.Name { return } - case token.GTR: // ;n > i; - if isBenchmark(cond.X) { - return - } - - nExpr = findNExpr(cond.X) + hasEquivalentOperator = cond.Op == token.LEQ + operand = cond.Y + case token.GTR, token.GEQ: // ;n > i; || ;n >= i; y, ok := cond.Y.(*ast.Ident) if !ok { return @@ -130,6 +126,9 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if y.Name != initIdent.Name { return } + + hasEquivalentOperator = cond.Op == token.GEQ + operand = cond.X default: return } @@ -228,7 +227,7 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { bc := &bodyChecker{ initIdent: initIdent, - nExpr: nExpr, + nExpr: findNExpr(operand), } ast.Inspect(forStmt.Body, bc.check) @@ -237,9 +236,50 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } + if initAssign { + pass.Report(analysis.Diagnostic{ + Pos: forStmt.Pos(), + Message: msg + "\nBecause the key is not part of the loop's scope, take care to consider side effects.", + }) + + return + } + + operandIsNumberLit := isNumberLit(operand) + + if hasEquivalentOperator && !operandIsNumberLit { + return + } + + rangeX := operandToString( + pass, + initIdent, + operand, + hasEquivalentOperator && operandIsNumberLit, + ) + + var replacement string + if bc.accessed { + replacement = fmt.Sprintf("%s := range %s", initIdent.Name, rangeX) + } else { + replacement = fmt.Sprintf("range %s", rangeX) + } + pass.Report(analysis.Diagnostic{ Pos: forStmt.Pos(), Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("Replace loop with `%s`", replacement), + TextEdits: []analysis.TextEdit{ + { + Pos: forStmt.Init.Pos(), + End: forStmt.Post.End(), + NewText: []byte(replacement), + }, + }, + }, + }, }) } @@ -363,26 +403,45 @@ func findNExpr(expr ast.Expr) ast.Expr { } } -func isBenchmark(expr ast.Expr) bool { - selectorExpr, ok := expr.(*ast.SelectorExpr) - if !ok { - return false - } +func recursiveOperandToString( + expr ast.Expr, + incrementInt bool, +) string { + switch e := expr.(type) { + case *ast.CallExpr: + args := "" - if selectorExpr.Sel.Name != "N" { - return false - } + for i, v := range e.Args { + if i > 0 { + args += ", " + } - ident, ok := selectorExpr.X.(*ast.Ident) - if !ok { - return false - } + args += recursiveOperandToString(v, incrementInt && len(e.Args) == 1) + } - if ident.Name == "b" { - return true - } + return recursiveOperandToString(e.Fun, false) + "(" + args + ")" + case *ast.BasicLit: + if incrementInt && e.Kind == token.INT { + v, err := strconv.Atoi(e.Value) + if err == nil { + return strconv.Itoa(v + 1) + } + + return e.Value + } - return false + return e.Value + case *ast.Ident: + return e.Name + case *ast.SelectorExpr: + return recursiveOperandToString(e.X, false) + "." + recursiveOperandToString(e.Sel, false) + case *ast.IndexExpr: + return recursiveOperandToString(e.X, false) + "[" + recursiveOperandToString(e.Index, false) + "]" + case *ast.BinaryExpr: + return recursiveOperandToString(e.X, false) + " " + e.Op.String() + " " + recursiveOperandToString(e.Y, false) + default: + return "" + } } func identEqual(a, b ast.Expr) bool { @@ -428,6 +487,7 @@ type bodyChecker struct { initIdent *ast.Ident nExpr ast.Expr modified bool + accessed bool } func (b *bodyChecker) check(n ast.Node) bool { @@ -446,11 +506,55 @@ func (b *bodyChecker) check(n ast.Node) bool { return false } + case *ast.Ident: + if identEqual(stmt, b.initIdent) { + b.accessed = true + } } return true } +func isNumberLit(exp ast.Expr) bool { + switch lit := exp.(type) { + case *ast.BasicLit: + if lit.Kind == token.INT { + return true + } + + return false + case *ast.CallExpr: + switch fun := lit.Fun.(type) { + case *ast.Ident: + switch fun.Name { + case + "int", + "int8", + "int16", + "int32", + "int64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64": + default: + return false + } + default: + return false + } + + if len(lit.Args) != 1 { + return false + } + + return isNumberLit(lit.Args[0]) + default: + return false + } +} + func compareNumberLit(exp ast.Expr, val int) bool { switch lit := exp.(type) { case *ast.BasicLit: @@ -497,3 +601,27 @@ func compareNumberLit(exp ast.Expr, val int) bool { return false } } + +func operandToString( + pass *analysis.Pass, + i *ast.Ident, + operand ast.Expr, + increment bool, +) string { + s := recursiveOperandToString(operand, increment) + t := pass.TypesInfo.TypeOf(i) + + if t == types.Typ[types.Int] { + if len(s) > 5 && s[:4] == "int(" && s[len(s)-1] == ')' { + s = s[4 : len(s)-1] + } + + return s + } + + if len(s) > 2 && s[len(s)-1] == ')' { + return s + } + + return t.String() + "(" + s + ")" +} diff --git a/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/vendor/github.com/curioswitch/go-reassign/.golangci.yml index e3bf79ae7..fdf0bb2f2 100644 --- a/vendor/github.com/curioswitch/go-reassign/.golangci.yml +++ b/vendor/github.com/curioswitch/go-reassign/.golangci.yml @@ -5,14 +5,12 @@ linters: - bodyclose - decorder - durationcheck + - err113 - errchkjson - errname - errorlint - - execinquery - exhaustive - - exportloopref - gocritic - - goerr113 - gofmt - goimports - goprintffuncname @@ -20,7 +18,6 @@ linters: - importas - misspell - nolintlint - - nosnakecase - prealloc - predeclared - promlinter diff --git a/vendor/github.com/curioswitch/go-reassign/README.md b/vendor/github.com/curioswitch/go-reassign/README.md index ac9c131df..190756f92 100644 --- a/vendor/github.com/curioswitch/go-reassign/README.md +++ b/vendor/github.com/curioswitch/go-reassign/README.md @@ -47,7 +47,8 @@ Package variable reassignment is generally confusing, though, and we recommend a The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is recommended if it works with your code. -## Limitations +## Development -If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing -can be confusing, so it's recommended to rename the variable. +[mage](https://magefile.org/) is used for development. Run `go run mage.go -l` to see available targets. + +For example, to run checks before sending a PR, run `go run mage.go check`. diff --git a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go index e1b47d5b9..c2a29c529 100644 --- a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go +++ b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go @@ -48,23 +48,35 @@ func run(pass *analysis.Pass) (interface{}, error) { func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) { switch x := expr.(type) { case *ast.SelectorExpr: - if !checkRE.MatchString(x.Sel.Name) { - return - } - selectIdent, ok := x.X.(*ast.Ident) if !ok { return } + var pkgPath string if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { - if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg { + pkg, ok := selectObj.(*types.PkgName) + if !ok || pkg.Imported() == pass.Pkg { return } + pkgPath = pkg.Imported().Path() } - pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + matches := false + if checkRE.MatchString(x.Sel.Name) { + matches = true + } + if !matches { + // Expression may include a package name, so check that too. Support was added later so we check + // just name and qualified name separately for compatibility. + if checkRE.MatchString(pkgPath + "." + x.Sel.Name) { + matches = true + } + } + if matches { + pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + } case *ast.Ident: use, ok := pass.TypesInfo.Uses[x].(*types.Var) if !ok { diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 92b78048e..6f24dfff5 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,8 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) ## [v3.12.1] - 2024-05-28 @@ -18,7 +21,7 @@ - fix by restoring custom JSON handler functions (Mike Beaumont #540) -## [v3.12.0] - 2023-08-19 +## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 7234604e4..3fb40d198 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -3,7 +3,7 @@ go-restful package for building REST-style Web Services using Google Go [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index a9b3faaa8..7f04bd905 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && (length == "" || length == "0") { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be7..a2056e2ac 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507..d072b81c7 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,30 +1,31 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. -`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). +`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty field when encoding +- `omitzero`: omit zero-value field when encoding + +As a special case, struct field tag "-" omits the field. + +NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field. ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. + +As a special case, struct field tag "-" omits the field. + +
🔎  Example encoding with struct field tag "-"

+ +https://go.dev/play/p/aWEIFxd7InX + +```Go +// https://github.com/fxamacker/cbor/issues/652 +package main + +import ( + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// The `cbor:"-"` tag omits the Type field when encoding to CBOR. +type Entity struct { + _ struct{} `cbor:",toarray"` + ID uint64 `json:"id"` + Type string `cbor:"-" json:"typeOf"` + Name string `json:"name"` +} + +func main() { + entity := Entity{ + ID: 1, + Type: "int64", + Name: "Identifier", + } + + c, _ := cbor.Marshal(entity) + diag, _ := cbor.Diagnose(c) + fmt.Printf("CBOR in hex: %x\n", c) + fmt.Printf("CBOR in edn: %s\n", diag) + + j, _ := json.Marshal(entity) + fmt.Printf("JSON: %s\n", string(j)) + + fmt.Printf("JSON encoding is %d bytes\n", len(j)) + fmt.Printf("CBOR encoding is %d bytes\n", len(c)) + + // Output: + // CBOR in hex: 82016a4964656e746966696572 + // CBOR in edn: [1, "Identifier"] + // JSON: {"id":1,"typeOf":"int64","name":"Identifier"} + // JSON encoding is 45 bytes + // CBOR encoding is 13 bytes +} +``` + +

-
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ```

+👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces. + +Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc. + +The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2). + +
🔎  Example using Embedded JSON Tag for CBOR (tag 262) + +```go +// https://github.com/fxamacker/cbor/issues/657 + +package cbor_test + +// NOTE: RFC 8949 does not mention tag number 262. IANA assigned +// CBOR tag number 262 as "Embedded JSON Object" specified by the +// document Embedded JSON Tag for CBOR: +// +// "Tag 262 can be applied to a byte string (major type 2) to indicate +// that the byte string is a JSON Object. The length of the byte string +// indicates the content." +// +// For more info, see Embedded JSON Tag for CBOR at: +// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +// cborTagNumForEmbeddedJSON is the CBOR tag number 262. +const cborTagNumForEmbeddedJSON = 262 + +// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item +// with tag number 262 and the tag content is a JSON object "embedded" as a +// CBOR byte string (major type 2). +type EmbeddedJSON struct { + any +} + +func NewEmbeddedJSON(val any) EmbeddedJSON { + return EmbeddedJSON{val} +} + +// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the +// tag number 262 and the tag content is a JSON object that is +// "embedded" as a CBOR byte string. +func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) { + // Encode v to JSON object. + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // Create cbor.Tag representing a tagged CBOR data item. + tag := cbor.Tag{ + Number: cborTagNumForEmbeddedJSON, + Content: data, + } + + // Marshal to a tagged CBOR data item. + return cbor.Marshal(tag) +} + +// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON. +// The byte slice provided to this function must contain a single +// tagged CBOR data item with the tag number 262 and tag content +// must be a JSON object "embedded" as a CBOR byte string. +func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error { + // Unmarshal tagged CBOR data item. + var tag cbor.Tag + if err := cbor.Unmarshal(b, &tag); err != nil { + return err + } + + // Check tag number. + if tag.Number != cborTagNumForEmbeddedJSON { + return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON) + } + + // Check tag content. + jsonData, isByteString := tag.Content.([]byte) + if !isByteString { + return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content) + } + + // Unmarshal JSON object. + return json.Unmarshal(jsonData, v) +} + +// MarshalJSON encodes EmbeddedJSON to a JSON object. +func (v EmbeddedJSON) MarshalJSON() ([]byte, error) { + return json.Marshal(v.any) +} + +// UnmarshalJSON decodes a JSON object. +func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error { + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + return dec.Decode(&v.any) +} + +func Example_embeddedJSONTagForCBOR() { + value := NewEmbeddedJSON(map[string]any{ + "name": "gopher", + "id": json.Number("42"), + }) + + data, err := cbor.Marshal(value) + if err != nil { + panic(err) + } + + fmt.Printf("cbor: %x\n", data) + + var v EmbeddedJSON + err = cbor.Unmarshal(data, &v) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", v.any) + for k, v := range v.any.(map[string]any) { + fmt.Printf(" %s: %v (%T)\n", k, v, v) + } +} +``` + +
+ + ### Functions and Interfaces -
Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed. Other useful functions: - `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. - `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. -- `Wellformed` returns true if the the CBOR data item is well-formed. +- `Wellformed` returns true if the CBOR data item is well-formed. Interfaces identical or comparable to Go `encoding` packages include: `Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. @@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs. +- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string. +- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function. +- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR. + +v2.9.0 passed fuzz tests and is production quality. + +The minimum version of Go required to build: +- v2.8.0 and newer releases require go 1.20+. +- v2.7.1 and older releases require go 1.17+. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. -v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). +[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). __IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. @@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. )`) + reSpaces = regexp.MustCompile(`(?s)>\s+<`) + reNewlines = regexp.MustCompile(`\r*\n`) // NL is the newline string used in XML output. NL = "\n" ) @@ -33,20 +36,19 @@ func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string if len(nestedTagsInComments) > 0 { nestedTagsInComment = nestedTagsInComments[0] } - reXmlComments := regexp.MustCompile(`(?s)()`) - src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + src := reSpaces.ReplaceAllString(xmls, "><") if nestedTagsInComment { - src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) - p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") + src = reXMLComments.ReplaceAllStringFunc(src, func(m string) string { + parts := reXMLComments.FindStringSubmatch(m) + p2 := reNewlines.ReplaceAllString(parts[2], " ") return parts[1] + html.EscapeString(p2) + parts[3] }) } rf := replaceTag(prefix, indent) r := prefix + reg.ReplaceAllStringFunc(src, rf) if nestedTagsInComment { - r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) + r = reXMLComments.ReplaceAllStringFunc(r, func(m string) string { + parts := reXMLComments.FindStringSubmatch(m) return parts[1] + html.UnescapeString(parts[2]) + parts[3] }) } diff --git a/vendor/github.com/golangci/dupl/.travis.yml b/vendor/github.com/golangci/dupl/.travis.yml deleted file mode 100644 index 33de24c0f..000000000 --- a/vendor/github.com/golangci/dupl/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.3 - - 1.8 - - 1.9 diff --git a/vendor/github.com/golangci/dupl/README.md b/vendor/github.com/golangci/dupl/README.md deleted file mode 100644 index f34901d7a..000000000 --- a/vendor/github.com/golangci/dupl/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# dupl [![Build Status](https://travis-ci.org/mibk/dupl.png)](https://travis-ci.org/mibk/dupl) - -**dupl** is a tool written in Go for finding code clones. So far it can find clones only -in the Go source files. The method uses suffix tree for serialized ASTs. It ignores values -of AST nodes. It just operates with their types (e.g. `if a == 13 {}` and `if x == 100 {}` are -considered the same provided it exceeds the minimal token sequence size). - -Due to the used method dupl can report so called "false positives" on the output. These are -the ones we do not consider clones (whether they are too small, or the values of the matched -tokens are completely different). - -## Installation - -```bash -go get -u github.com/golangci/dupl -``` - -## Usage - -``` -Usage of dupl: - dupl [flags] [paths] - -Paths: - If the given path is a file, dupl will use it regardless of - the file extension. If it is a directory it will recursively - search for *.go files in that directory. - - If no path is given dupl will recursively search for *.go - files in the current directory. - -Flags: - -files - read file names from stdin one at each line - -html - output the results as HTML, including duplicate code fragments - -plumbing - plumbing (easy-to-parse) output for consumption by scripts or tools - -t, -threshold size - minimum token sequence size as a clone (default 15) - -vendor - check files in vendor directory - -v, -verbose - explain what is being done - -Examples: - dupl -t 100 - Search clones in the current directory of size at least - 100 tokens. - dupl $(find app/ -name '*_test.go') - Search for clones in tests in the app directory. - find app/ -name '*_test.go' |dupl -files - The same as above. -``` - -## Example - -The reduced output of this command with the following parameters for the [Docker](https://www.docker.com) source code -looks like [this](http://htmlpreview.github.io/?https://github.com/golangci/dupl/blob/master/_output_example/docker.html). - -```bash -$ dupl -t 200 -html >docker.html -``` diff --git a/vendor/github.com/golangci/dupl/main.go b/vendor/github.com/golangci/dupl/lib/lib.go similarity index 51% rename from vendor/github.com/golangci/dupl/main.go rename to vendor/github.com/golangci/dupl/lib/lib.go index 3030a97ae..3000a8f38 100644 --- a/vendor/github.com/golangci/dupl/main.go +++ b/vendor/github.com/golangci/dupl/lib/lib.go @@ -1,11 +1,8 @@ -package dupl +// Package lib Golangci-lint: altered version of main.go +package lib import ( - "flag" - "fmt" - "io/ioutil" "os" - "path/filepath" "sort" "github.com/golangci/dupl/job" @@ -13,27 +10,6 @@ import ( "github.com/golangci/dupl/syntax" ) -const defaultThreshold = 15 - -var ( - paths = []string{"."} - vendor = flag.Bool("dupl.vendor", false, "") - verbose = flag.Bool("dupl.verbose", false, "") - files = flag.Bool("dupl.files", false, "") - - html = flag.Bool("dupl.html", false, "") - plumbing = flag.Bool("dupl.plumbing", false, "") -) - -const ( - vendorDirPrefix = "vendor" + string(filepath.Separator) - vendorDirInPath = string(filepath.Separator) + vendorDirPrefix -) - -func init() { - flag.BoolVar(verbose, "dupl.v", false, "alias for -verbose") -} - func Run(files []string, threshold int) ([]printer.Issue, error) { fchan := make(chan string, 1024) go func() { @@ -75,7 +51,7 @@ func makeIssues(duplChan <-chan syntax.Match) ([]printer.Issue, error) { } sort.Strings(keys) - p := printer.NewPlumbing(ioutil.ReadFile) + p := printer.NewIssuer(os.ReadFile) var issues []printer.Issue for _, k := range keys { @@ -110,39 +86,3 @@ func unique(group [][]*syntax.Node) [][]*syntax.Node { } return newGroup } - -func usage() { - fmt.Fprintln(os.Stderr, `Usage: dupl [flags] [paths] - -Paths: - If the given path is a file, dupl will use it regardless of - the file extension. If it is a directory, it will recursively - search for *.go files in that directory. - - If no path is given, dupl will recursively search for *.go - files in the current directory. - -Flags: - -files - read file names from stdin one at each line - -html - output the results as HTML, including duplicate code fragments - -plumbing - plumbing (easy-to-parse) output for consumption by scripts or tools - -t, -threshold size - minimum token sequence size as a clone (default 15) - -vendor - check files in vendor directory - -v, -verbose - explain what is being done - -Examples: - dupl -t 100 - Search clones in the current directory of size at least - 100 tokens. - dupl $(find app/ -name '*_test.go') - Search for clones in tests in the app directory. - find app/ -name '*_test.go' |dupl -files - The same as above.`) - os.Exit(2) -} diff --git a/vendor/github.com/golangci/dupl/printer/html.go b/vendor/github.com/golangci/dupl/printer/html.go index 5ad9e25c7..ac1474141 100644 --- a/vendor/github.com/golangci/dupl/printer/html.go +++ b/vendor/github.com/golangci/dupl/printer/html.go @@ -3,6 +3,7 @@ package printer import ( "bytes" "fmt" + "html" "io" "regexp" "sort" @@ -10,17 +11,17 @@ import ( "github.com/golangci/dupl/syntax" ) -type html struct { +type htmlprinter struct { iota int w io.Writer ReadFile } func NewHTML(w io.Writer, fread ReadFile) Printer { - return &html{w: w, ReadFile: fread} + return &htmlprinter{w: w, ReadFile: fread} } -func (p *html) PrintHeader() error { +func (p *htmlprinter) PrintHeader() error { _, err := fmt.Fprint(p.w, ` Duplicates @@ -35,7 +36,7 @@ func (p *html) PrintHeader() error { return err } -func (p *html) PrintClones(dups [][]*syntax.Node) error { +func (p *htmlprinter) PrintClones(dups [][]*syntax.Node) error { p.iota++ fmt.Fprintf(p.w, "

#%d found %d clones

\n", p.iota, len(dups)) @@ -63,12 +64,13 @@ func (p *html) PrintClones(dups [][]*syntax.Node) error { sort.Sort(byNameAndLine(clones)) for _, cl := range clones { - fmt.Fprintf(p.w, "

%s:%d

\n
%s
\n", cl.filename, cl.lineStart, cl.fragment) + fmt.Fprintf(p.w, "

%s:%d

\n
%s
\n", cl.filename, cl.lineStart, + html.EscapeString(string(cl.fragment))) } return nil } -func (*html) PrintFooter() error { return nil } +func (*htmlprinter) PrintFooter() error { return nil } func findLineBeg(file []byte, index int) int { for i := index; i >= 0; i-- { diff --git a/vendor/github.com/golangci/dupl/printer/issuer.go b/vendor/github.com/golangci/dupl/printer/issuer.go new file mode 100644 index 000000000..9b79f5705 --- /dev/null +++ b/vendor/github.com/golangci/dupl/printer/issuer.go @@ -0,0 +1,56 @@ +package printer + +// Golangci-lint: altered version of plumbing.go + +import ( + "sort" + + "github.com/golangci/dupl/syntax" +) + +type Clone clone + +func (c Clone) Filename() string { + return c.filename +} + +func (c Clone) LineStart() int { + return c.lineStart +} + +func (c Clone) LineEnd() int { + return c.lineEnd +} + +type Issue struct { + From, To Clone +} + +type Issuer struct { + ReadFile +} + +func NewIssuer(fread ReadFile) *Issuer { + return &Issuer{fread} +} + +func (p *Issuer) MakeIssues(dups [][]*syntax.Node) ([]Issue, error) { + clones, err := prepareClonesInfo(p.ReadFile, dups) + if err != nil { + return nil, err + } + + sort.Sort(byNameAndLine(clones)) + + var issues []Issue + + for i, cl := range clones { + nextCl := clones[(i+1)%len(clones)] + issues = append(issues, Issue{ + From: Clone(cl), + To: Clone(nextCl), + }) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/dupl/printer/plumbing.go b/vendor/github.com/golangci/dupl/printer/plumbing.go index cf39d01b7..b0577ddd5 100644 --- a/vendor/github.com/golangci/dupl/printer/plumbing.go +++ b/vendor/github.com/golangci/dupl/printer/plumbing.go @@ -1,50 +1,36 @@ package printer import ( + "fmt" + "io" "sort" "github.com/golangci/dupl/syntax" ) -type Clone clone - -func (c Clone) Filename() string { - return c.filename -} - -func (c Clone) LineStart() int { - return c.lineStart -} - -func (c Clone) LineEnd() int { - return c.lineEnd -} - -type Issue struct { - From, To Clone -} - -type Plumbing struct { +type plumbing struct { + w io.Writer ReadFile } -func NewPlumbing(fread ReadFile) *Plumbing { - return &Plumbing{fread} +func NewPlumbing(w io.Writer, fread ReadFile) Printer { + return &plumbing{w, fread} } -func (p *Plumbing) MakeIssues(dups [][]*syntax.Node) ([]Issue, error) { +func (p *plumbing) PrintHeader() error { return nil } + +func (p *plumbing) PrintClones(dups [][]*syntax.Node) error { clones, err := prepareClonesInfo(p.ReadFile, dups) if err != nil { - return nil, err + return err } sort.Sort(byNameAndLine(clones)) - var issues []Issue for i, cl := range clones { nextCl := clones[(i+1)%len(clones)] - issues = append(issues, Issue{ - From: Clone(cl), - To: Clone(nextCl), - }) + fmt.Fprintf(p.w, "%s:%d-%d: duplicate of %s:%d-%d\n", cl.filename, cl.lineStart, cl.lineEnd, + nextCl.filename, nextCl.lineStart, nextCl.lineEnd) } - return issues, nil + return nil } + +func (p *plumbing) PrintFooter() error { return nil } diff --git a/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go b/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go index 738015025..871469e8d 100644 --- a/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go +++ b/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go @@ -41,7 +41,7 @@ func New() *STree { // Update refreshes the suffix tree to by new data. func (t *STree) Update(data ...Token) { t.data = append(t.data, data...) - for _ = range data { + for range data { t.update() t.s, t.start = t.canonize(t.s, t.start, t.end) t.end++ diff --git a/vendor/github.com/golangci/dupl/syntax/syntax.go b/vendor/github.com/golangci/dupl/syntax/syntax.go index e2c750afd..9b11d3119 100644 --- a/vendor/github.com/golangci/dupl/syntax/syntax.go +++ b/vendor/github.com/golangci/dupl/syntax/syntax.go @@ -6,6 +6,19 @@ import ( "github.com/golangci/dupl/suffixtree" ) +// To avoid "goroutine stack exceeds" with gigantic slices (Composite Literals). +// 10_000 => 0.89s +// 20_000 => 1.53s +// 30_000 => 2.57s +// 40_000 => 3.89s +// 50_000 => 5.58s +// 60_000 => 7.95s +// 70_000 => 10.15s +// 80_000 => 13.11s +// 90_000 => 16.62s +// 100_000 => 21.42s +const maxChildrenSerial = 10_000 + type Node struct { Type int Filename string @@ -40,7 +53,12 @@ func Serialize(n *Node) []*Node { func serial(n *Node, stream *[]*Node) int { *stream = append(*stream, n) var count int - for _, child := range n.Children { + for i, child := range n.Children { + // To avoid "goroutine stack exceeds" with gigantic slices (Composite Literals). + if i > maxChildrenSerial { + break + } + count += serial(child, stream) } n.Owns = count diff --git a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go index 909d37657..a4f252e86 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go +++ b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "github.com/golangci/gofmt/gofmt/internal/diff" + "github.com/rogpeppe/go-internal/diff" "golang.org/x/sync/semaphore" ) diff --git a/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/vendor/github.com/golangci/gofmt/gofmt/golangci.go index a69611e1d..a7f3ef6e7 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/golangci.go +++ b/vendor/github.com/golangci/gofmt/gofmt/golangci.go @@ -11,9 +11,14 @@ import ( "path/filepath" "sync" - "github.com/golangci/gofmt/gofmt/internal/diff" + "github.com/rogpeppe/go-internal/diff" ) +type Options struct { + NeedSimplify bool + RewriteRules []RewriteRule +} + var parserModeMu sync.RWMutex type RewriteRule struct { @@ -22,13 +27,13 @@ type RewriteRule struct { } // Run runs gofmt. -// Deprecated: use RunRewrite instead. +// Deprecated: use [Source] instead. func Run(filename string, needSimplify bool) ([]byte, error) { return RunRewrite(filename, needSimplify, nil) } // RunRewrite runs gofmt. -// empty string `rewrite` will be ignored. +// Deprecated: use [Source] instead. func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) ([]byte, error) { src, err := os.ReadFile(filename) if err != nil { @@ -73,6 +78,34 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) return diff.Diff(oldName, src, newName, res), nil } +// Source formats the code like gofmt. +// Empty string `rewrite` will be ignored. +func Source(filename string, src []byte, opts Options) ([]byte, error) { + fset := token.NewFileSet() + + parserModeMu.Lock() + initParserMode() + parserModeMu.Unlock() + + file, sourceAdj, indentAdj, err := parse(fset, filename, src, false) + if err != nil { + return nil, err + } + + file, err = rewriteFileContent(fset, file, opts.RewriteRules) + if err != nil { + return nil, err + } + + ast.SortImports(fset, file) + + if opts.NeedSimplify { + simplify(file) + } + + return format(fset, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) +} + func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) { for _, rewriteRule := range rewriteRules { pattern, err := parseExpression(rewriteRule.Pattern, "pattern") diff --git a/vendor/github.com/golangci/gofmt/gofmt/readme.md b/vendor/github.com/golangci/gofmt/gofmt/readme.md index be08179e6..907973116 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/readme.md +++ b/vendor/github.com/golangci/gofmt/gofmt/readme.md @@ -3,8 +3,7 @@ - https://github.com/golang/go/blob/master/src/cmd/gofmt/ - https://github.com/golang/go/blob/master/src/internal/testenv - https://github.com/golang/go/blob/master/src/internal/platform -- https://github.com/golang/go/blob/master/src/internal/txtar -- https://github.com/golang/go/blob/master/src/internal/diff +- https://github.com/golang/go/blob/master/src/internal/diff -> replaced by `github.com/rogpeppe/go-internal/diff` - https://github.com/golang/go/blob/master/src/internal/cfg ## Updates diff --git a/vendor/github.com/golangci/gofmt/goimports/goimports.go b/vendor/github.com/golangci/gofmt/goimports/goimports.go deleted file mode 100644 index 556f2bd7e..000000000 --- a/vendor/github.com/golangci/gofmt/goimports/goimports.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package goimports - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" -) - -// Extracted from golang.org/x/tools@v0.24.0/cmd/goimports/goimports.go - -func writeTempFile(dir, prefix string, data []byte) (string, error) { - file, err := os.CreateTemp(dir, prefix) - if err != nil { - return "", err - } - _, err = file.Write(data) - if err1 := file.Close(); err == nil { - err = err1 - } - if err != nil { - os.Remove(file.Name()) - return "", err - } - return file.Name(), nil -} - -func diff(b1, b2 []byte, filename string) (data []byte, err error) { - f1, err := writeTempFile("", "gofmt", b1) - if err != nil { - return - } - defer os.Remove(f1) - - f2, err := writeTempFile("", "gofmt", b2) - if err != nil { - return - } - defer os.Remove(f2) - - cmd := "diff" - if runtime.GOOS == "plan9" { - cmd = "/bin/ape/diff" - } - - data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - return replaceTempFilename(data, filename) - } - return -} - -// replaceTempFilename replaces temporary filenames in diff with actual one. -// -// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 -// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 -// ... -// -> -// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 -// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 -// ... -func replaceTempFilename(diff []byte, filename string) ([]byte, error) { - bs := bytes.SplitN(diff, []byte{'\n'}, 3) - if len(bs) < 3 { - return nil, fmt.Errorf("got unexpected diff for %s", filename) - } - // Preserve timestamps. - var t0, t1 []byte - if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { - t0 = bs[0][i:] - } - if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { - t1 = bs[1][i:] - } - // Always print filepath with slash separator. - f := filepath.ToSlash(filename) - bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) - bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) - return bytes.Join(bs, []byte{'\n'}), nil -} diff --git a/vendor/github.com/golangci/gofmt/goimports/golangci.go b/vendor/github.com/golangci/gofmt/goimports/golangci.go deleted file mode 100644 index 6ff286ae0..000000000 --- a/vendor/github.com/golangci/gofmt/goimports/golangci.go +++ /dev/null @@ -1,35 +0,0 @@ -package goimports - -import ( - "bytes" - "fmt" - "os" - - "golang.org/x/tools/imports" -) - -// Run runs goimports. -// The local prefixes (comma separated) must be defined through the global variable imports.LocalPrefix. -func Run(filename string) ([]byte, error) { - src, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - - res, err := imports.Process(filename, src, nil) - if err != nil { - return nil, err - } - - if bytes.Equal(src, res) { - return nil, nil - } - - // formatting has changed - data, err := diff(src, res, filename) - if err != nil { - return nil, fmt.Errorf("error computing diff: %s", err) - } - - return data, nil -} diff --git a/vendor/github.com/golangci/gofmt/goimports/readme.md b/vendor/github.com/golangci/gofmt/goimports/readme.md deleted file mode 100644 index 23eecf82f..000000000 --- a/vendor/github.com/golangci/gofmt/goimports/readme.md +++ /dev/null @@ -1,10 +0,0 @@ -# Hard Fork of goimports - -- https://github.com/golang/tools/tree/master/cmd/goimports - -## Updates - -- 2024-08-17: Sync with golang.org/x/tools v0.24.0 -- 2024-02-28: Sync with golang.org/x/tools v0.18.0 -- 2023-10-04: Sync with golang.org/x/tools v0.13.0 -- 2022-08-31: Sync with golang.org/x/tools v0.1.12 diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go index 413e071d6..bf235bf17 100644 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -1,6 +1,7 @@ package main import ( + "cmp" "fmt" "os" "runtime/debug" @@ -63,17 +64,9 @@ func createBuildInfo() commands.BuildInfo { } } - if revision == "" { - revision = "unknown" - } - - if modified == "" { - modified = "?" - } - - if info.Date == "" { - info.Date = "(unknown)" - } + revision = cmp.Or(revision, "unknown") + modified = cmp.Or(modified, "?") + info.Date = cmp.Or(info.Date, "(unknown)") info.Commit = fmt.Sprintf("(%s, modified: %s, mod sum: %q)", revision, modified, buildInfo.Main.Sum) diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go index c249084e1..41eb5c82b 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go @@ -6,12 +6,12 @@ import ( "encoding/hex" "errors" "fmt" + "maps" "runtime" "slices" "strings" "sync" - "golang.org/x/exp/maps" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/internal/go/cache" @@ -178,9 +178,7 @@ func (c *Cache) computePkgHash(pkg *packages.Package) (hashResults, error) { curSum := key.Sum() hashRes[HashModeNeedOnlySelf] = hex.EncodeToString(curSum[:]) - imps := maps.Values(pkg.Imports) - - slices.SortFunc(imps, func(a, b *packages.Package) int { + imps := slices.SortedFunc(maps.Values(pkg.Imports), func(a, b *packages.Package) int { return strings.Compare(a.PkgPath, b.PkgPath) }) diff --git a/vendor/github.com/sagikazarmark/slog-shim/LICENSE b/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE similarity index 92% rename from vendor/github.com/sagikazarmark/slog-shim/LICENSE rename to vendor/github.com/golangci/golangci-lint/internal/x/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/github.com/sagikazarmark/slog-shim/LICENSE +++ b/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md new file mode 100644 index 000000000..4d221d4ca --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md @@ -0,0 +1,8 @@ +# analysisflags + +Extracted from `/go/analysis/internal/analysisflags` (related to `checker`). +This is just a copy of the code without any changes. + +## History + +- sync with https://github.com/golang/tools/blob/v0.28.0 diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go new file mode 100644 index 000000000..26a917a99 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "fmt" + "net/url" + + "golang.org/x/tools/go/analysis" +) + +// ResolveURL resolves the URL field for a Diagnostic from an Analyzer +// and returns the URL. See Diagnostic.URL for details. +func ResolveURL(a *analysis.Analyzer, d analysis.Diagnostic) (string, error) { + if d.URL == "" && d.Category == "" && a.URL == "" { + return "", nil // do nothing + } + raw := d.URL + if d.URL == "" && d.Category != "" { + raw = "#" + d.Category + } + u, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid Diagnostic.URL %q: %s", raw, err) + } + base, err := url.Parse(a.URL) + if err != nil { + return "", fmt.Errorf("invalid Analyzer.URL %q: %s", a.URL, err) + } + return base.ResolveReference(u).String(), nil +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go new file mode 100644 index 000000000..bb12600da --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisinternal provides gopls' internal analyses with a +// number of helper functions that operate on typed syntax trees. +package analysisinternal + +import ( + "fmt" + "os" + + "golang.org/x/tools/go/analysis" +) + +// MakeReadFile returns a simple implementation of the Pass.ReadFile function. +func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) { + return func(filename string) ([]byte, error) { + if err := CheckReadable(pass, filename); err != nil { + return nil, err + } + return os.ReadFile(filename) + } +} + +// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. +func CheckReadable(pass *analysis.Pass, filename string) error { + if slicesContains(pass.OtherFiles, filename) || + slicesContains(pass.IgnoredFiles, filename) { + return nil + } + for _, f := range pass.Files { + if pass.Fset.File(f.FileStart).Name() == filename { + return nil + } + } + return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) +} + +// TODO(adonovan): use go1.21 slices.Contains. +func slicesContains[S ~[]E, E comparable](slice S, x E) bool { + for _, elem := range slice { + if elem == x { + return true + } + } + return false +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md new file mode 100644 index 000000000..f301cdbeb --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md @@ -0,0 +1,8 @@ +# analysisinternal + +Extracted from `/internal/analysisinternal/` (related to `checker`). +This is just a copy of the code without any changes. + +## History + +- sync with https://github.com/golang/tools/blob/v0.28.0 diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go new file mode 100644 index 000000000..a13547b7a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// ApplyBytes is like Apply, but it accepts a byte slice. +// The result is always a new array. +func ApplyBytes(src []byte, edits []Edit) ([]byte, error) { + res, err := Apply(string(src), edits) + return []byte(res), err +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all deletions begin and end at the start of a line, + // and all insertions end with a newline? + // (This is merely a fast path.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' || // not at line start + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert + goto expand // slow path + } + } + return edits, nil // aligned + +expand: + if len(edits) == 0 { + return edits, nil // no edits (unreachable due to fast path) + } + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if end > 0 && src[end-1] != '\n' || + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go new file mode 100644 index 000000000..c3e82dd26 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go new file mode 100644 index 000000000..9029dd20b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙ + c | | | | | | | + ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙ + c | | | | | | | + ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh new file mode 100644 index 000000000..b25ba4aac --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/golang/completion/completion.go +# file=internal/golang/diagnostics.go +file=internal/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go new file mode 100644 index 000000000..504913d1d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go new file mode 100644 index 000000000..4353da15b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 100 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go new file mode 100644 index 000000000..2d72d2630 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go new file mode 100644 index 000000000..f7aa2b79f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go @@ -0,0 +1,99 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "github.com/golangci/golangci-lint/internal/x/tools/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// isASCII reports whether s contains only ASCII. +func isASCII[S string | []byte](s S) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md new file mode 100644 index 000000000..4b9798498 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md @@ -0,0 +1,8 @@ +# diff + +Extracted from `/internal/diff/` (related to `fixer`). +This is just a copy of the code without any changes. + +## History + +- sync with https://github.com/golang/tools/blob/v0.28.0 diff --git a/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go new file mode 100644 index 000000000..cfbda6102 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go @@ -0,0 +1,251 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// DefaultContextLines is the number of unchanged lines of surrounding +// context displayed by Unified. Use ToUnified to specify a different value. +const DefaultContextLines = 3 + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits, DefaultContextLines) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff, +// with contextLines lines of (unchanged) context around each diff hunk. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit, contextLines int) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits, contextLines) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // from is the name of the original file. + from string + // to is the name of the modified file. + to string + // hunks is the set of edit hunks needed to transform the file content. + hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + fromLine int + // The line in the original source where the hunk finishes. + toLine int + // The set of line based edits to apply. + lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // kind is the type of line this represents, deletion, insertion or copy. + kind opKind + // content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + content string +} + +// opKind is used to denote the type of operation a line represents. +type opKind int + +const ( + // opDelete is the operation kind for a line that is present in the input + // but not in the output. + opDelete opKind = iota + // opInsert is the operation kind for a line that is new in the output. + opInsert + // opEqual is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + opEqual +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k opKind) String() string { + switch k { + case opDelete: + return "delete" + case opInsert: + return "insert" + case opEqual: + return "equal" + default: + panic("unknown operation kind") + } +} + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit, contextLines int) (unified, error) { + gap := contextLines * 2 + u := unified{ + from: fromName, + to: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + toLine += start - last + h = &hunk{ + fromLine: start + 1, + toLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-contextLines, start) + h.fromLine -= delta + h.toLine -= delta + } + last = start + for i := start; i < end; i++ { + h.lines = append(h.lines, line{kind: opDelete, content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.lines = append(h.lines, line{kind: opInsert, content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.lines = append(h.lines, line{kind: opEqual, content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.from) + fmt.Fprintf(b, "+++ %s\n", u.to) + for _, hunk := range u.hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fromCount++ + case opInsert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount) + } else if hunk.fromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.fromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount) + } else if hunk.toLine == 1 && toCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " +0,0") + } else { + fmt.Fprintf(b, " +%d", hunk.toLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fmt.Fprintf(b, "-%s", l.content) + case opInsert: + fmt.Fprintf(b, "+%s", l.content) + default: + fmt.Fprintf(b, " %s", l.content) + } + if !strings.HasSuffix(l.content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go index 89017e9bf..76e09581c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go @@ -1,18 +1,20 @@ package commands import ( + "context" + "encoding/json" "errors" "fmt" "net/http" "os" "path/filepath" + "strconv" "strings" "time" hcversion "github.com/hashicorp/go-version" "github.com/pelletier/go-toml/v2" - "github.com/santhosh-tekuri/jsonschema/v5" - "github.com/santhosh-tekuri/jsonschema/v5/httploader" + "github.com/santhosh-tekuri/jsonschema/v6" "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v3" @@ -43,9 +45,7 @@ func (c *configCommand) executeVerify(cmd *cobra.Command, _ []string) error { return fmt.Errorf("[%s] validate: %w", usedConfigFile, err) } - detail := v.DetailedOutput() - - printValidationDetail(cmd, &detail) + printValidationDetail(cmd, v.DetailedOutput()) return errors.New("the configuration contains invalid elements") } @@ -70,40 +70,67 @@ func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error) return "", fmt.Errorf("parse version: %w", err) } - schemaURL = fmt.Sprintf("https://golangci-lint.run/jsonschema/golangci.v%d.%d.jsonschema.json", - version.Segments()[0], version.Segments()[1]) + if version.Core().Equal(hcversion.Must(hcversion.NewVersion("v0.0.0"))) { + commit, err := extractCommitHash(buildInfo) + if err != nil { + return "", err + } - case buildInfo.Commit != "" && buildInfo.Commit != "?": - if buildInfo.Commit == "unknown" { - return "", errors.New("unknown commit information") + return fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", + commit), nil } - commit := buildInfo.Commit - - if strings.HasPrefix(commit, "(") { - c, _, ok := strings.Cut(strings.TrimPrefix(commit, "("), ",") - if !ok { - return "", errors.New("commit information not found") - } + return fmt.Sprintf("https://golangci-lint.run/jsonschema/golangci.v%d.%d.jsonschema.json", + version.Segments()[0], version.Segments()[1]), nil - commit = c + case buildInfo.Commit != "" && buildInfo.Commit != "?": + commit, err := extractCommitHash(buildInfo) + if err != nil { + return "", err } - schemaURL = fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", - commit) + return fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", + commit), nil default: return "", errors.New("version not found") } +} + +func extractCommitHash(buildInfo BuildInfo) (string, error) { + if buildInfo.Commit == "" || buildInfo.Commit == "?" { + return "", errors.New("empty commit information") + } + + if buildInfo.Commit == "unknown" { + return "", errors.New("unknown commit information") + } + + commit := buildInfo.Commit + + if strings.HasPrefix(commit, "(") { + c, _, ok := strings.Cut(strings.TrimPrefix(commit, "("), ",") + if !ok { + return "", errors.New("commit information not found") + } + + commit = c + } + + if commit == "unknown" { + return "", errors.New("unknown commit information") + } - return schemaURL, nil + return commit, nil } func validateConfiguration(schemaPath, targetFile string) error { - httploader.Client = &http.Client{Timeout: 2 * time.Second} - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft7 + compiler.UseLoader(jsonschema.SchemeURLLoader{ + "file": jsonschema.FileLoader{}, + "https": newJSONSchemaHTTPLoader(), + }) + compiler.DefaultDraft(jsonschema.Draft7) schema, err := compiler.Compile(schemaPath) if err != nil { @@ -133,10 +160,13 @@ func validateConfiguration(schemaPath, targetFile string) error { return schema.Validate(m) } -func printValidationDetail(cmd *cobra.Command, detail *jsonschema.Detailed) { - if detail.Error != "" { +func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) { + if detail.Error != nil { + data, _ := json.Marshal(detail.Error) + details, _ := strconv.Unquote(string(data)) + cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", - strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, detail.Error) + strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, details) } for _, d := range detail.Errors { @@ -177,3 +207,33 @@ func decodeTomlFile(filename string) (any, error) { return m, nil } + +type jsonschemaHTTPLoader struct { + *http.Client +} + +func newJSONSchemaHTTPLoader() *jsonschemaHTTPLoader { + return &jsonschemaHTTPLoader{Client: &http.Client{ + Timeout: 2 * time.Second, + }} +} + +func (l jsonschemaHTTPLoader) Load(url string) (any, error) { + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) + if err != nil { + return nil, err + } + + resp, err := l.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) + } + + return jsonschema.UnmarshalJSON(resp.Body) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go index 608f6b9de..d514f1271 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go @@ -28,11 +28,11 @@ func setupLintersFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Enable only fast linters from enabled linters set (first run won't be fast)")) internal.AddHackedStringSliceP(fs, "presets", "p", - color.GreenString(fmt.Sprintf("Enable presets (%s) of linters.\n"+ - "Run 'golangci-lint help linters' to see them.\n"+ + formatList("Enable presets of linters:", lintersdb.AllPresets(), + "Run 'golangci-lint help linters' to see them.", "This option implies option --disable-all", - strings.Join(lintersdb.AllPresets(), "|"), - ))) + ), + ) fs.StringSlice("enable-only", nil, color.GreenString("Override linters configuration section to only run the specific linter(s)")) // Flags only. @@ -49,14 +49,13 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "go", "run.go", "", color.GreenString("Targeted Go version")) internal.AddHackedStringSlice(fs, "build-tags", color.GreenString("Build tags")) - internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work")) + internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, + color.GreenString("Timeout for total work. If <= 0, the timeout is disabled")) internal.AddFlagAndBind(v, fs, fs.Bool, "tests", "run.tests", true, color.GreenString("Analyze tests (*_test.go)")) internal.AddDeprecatedHackedStringSlice(fs, "skip-files", color.GreenString("Regexps of files to skip")) internal.AddDeprecatedHackedStringSlice(fs, "skip-dirs", color.GreenString("Regexps of directories to skip")) - internal.AddDeprecatedFlagAndBind(v, fs, fs.Bool, "skip-dirs-use-default", "run.skip-dirs-use-default", true, - getDefaultDirectoryExcludeHelp()) const allowParallelDesc = "Allow multiple parallel golangci-lint instances running.\n" + "If false (default) - golangci-lint acquires file lock on start." @@ -69,13 +68,11 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { func setupOutputFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "out-format", "output.formats", config.OutFormatColoredLineNumber, - color.GreenString(fmt.Sprintf("Formats of output: %s", strings.Join(config.AllOutputFormats, "|")))) + formatList("Formats of output:", config.AllOutputFormats)) internal.AddFlagAndBind(v, fs, fs.Bool, "print-issued-lines", "output.print-issued-lines", true, color.GreenString("Print lines of code with issue")) internal.AddFlagAndBind(v, fs, fs.Bool, "print-linter-name", "output.print-linter-name", true, color.GreenString("Print linter name in issue line")) - internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "output.uniq-by-line", true, - color.GreenString("Make issues output unique by line")) internal.AddFlagAndBind(v, fs, fs.Bool, "sort-results", "output.sort-results", false, color.GreenString("Sort linter results")) internal.AddFlagAndBind(v, fs, fs.StringSlice, "sort-order", "output.sort-order", nil, @@ -97,11 +94,13 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Maximum issues count per one linter. Set to 0 to disable")) internal.AddFlagAndBind(v, fs, fs.Int, "max-same-issues", "issues.max-same-issues", 3, color.GreenString("Maximum count of issues with the same text. Set to 0 to disable")) + internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "issues.uniq-by-line", true, + color.GreenString("Make issues output unique by line")) internal.AddHackedStringSlice(fs, "exclude-files", color.GreenString("Regexps of files to exclude")) internal.AddHackedStringSlice(fs, "exclude-dirs", color.GreenString("Regexps of directories to exclude")) internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-dirs-use-default", "issues.exclude-dirs-use-default", true, - getDefaultDirectoryExcludeHelp()) + formatList("Use or not use default excluded directories:", processors.StdExcludeDirRegexps)) internal.AddFlagAndBind(v, fs, fs.String, "exclude-generated", "issues.exclude-generated", processors.AutogeneratedModeLax, color.GreenString("Mode of the generated files analysis")) @@ -117,12 +116,31 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Show only new issues created after git revision `REV`")) internal.AddFlagAndBind(v, fs, fs.String, "new-from-patch", "issues.new-from-patch", "", color.GreenString("Show only new issues created in git patch with file path `PATH`")) + internal.AddFlagAndBind(v, fs, fs.String, "new-from-merge-base", "issues.new-from-merge-base", "", + color.GreenString("Show only new issues created after the best common ancestor (merge-base against HEAD)")) internal.AddFlagAndBind(v, fs, fs.Bool, "whole-files", "issues.whole-files", false, color.GreenString("Show issues in any part of update files (requires new-from-rev or new-from-patch)")) internal.AddFlagAndBind(v, fs, fs.Bool, "fix", "issues.fix", false, color.GreenString("Fix found issues (if it's supported by the linter)")) } +func formatList(head string, items []string, foot ...string) string { + parts := []string{color.GreenString(head)} + for _, p := range items { + parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(p))) + } + + for _, s := range foot { + parts = append(parts, color.GreenString(s)) + } + + if len(foot) == 0 { + parts = append(parts, "") + } + + return strings.Join(parts, "\n") +} + func getDefaultIssueExcludeHelp() string { parts := []string{color.GreenString("Use or not use default excludes:")} @@ -135,12 +153,3 @@ func getDefaultIssueExcludeHelp() string { return strings.Join(parts, "\n") } - -func getDefaultDirectoryExcludeHelp() string { - parts := []string{color.GreenString("Use or not use default excluded directories:")} - for _, dir := range processors.StdExcludeDirRegexps { - parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) - } - parts = append(parts, "") - return strings.Join(parts, "\n") -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go index 094e5d190..de4a9998f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -1,10 +1,13 @@ package commands import ( + "encoding/json" "fmt" "slices" "sort" "strings" + "unicode" + "unicode/utf8" "github.com/fatih/color" "github.com/spf13/cobra" @@ -15,9 +18,27 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) +type linterHelp struct { + Name string `json:"name"` + Desc string `json:"description"` + Fast bool `json:"fast"` + AutoFix bool `json:"autoFix"` + Presets []string `json:"presets"` + EnabledByDefault bool `json:"enabledByDefault"` + Deprecated bool `json:"deprecated"` + Since string `json:"since"` + OriginalURL string `json:"originalURL,omitempty"` +} + +type helpOptions struct { + JSON bool +} + type helpCommand struct { cmd *cobra.Command + opts helpOptions + dbManager *lintersdb.Manager log logutils.Log @@ -35,16 +56,21 @@ func newHelpCommand(logger logutils.Log) *helpCommand { }, } - helpCmd.AddCommand( - &cobra.Command{ - Use: "linters", - Short: "Help about linters", - Args: cobra.NoArgs, - ValidArgsFunction: cobra.NoFileCompletions, - Run: c.execute, - PreRunE: c.preRunE, - }, - ) + lintersCmd := &cobra.Command{ + Use: "linters", + Short: "Help about linters", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: c.execute, + PreRunE: c.preRunE, + } + + helpCmd.AddCommand(lintersCmd) + + fs := lintersCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + fs.BoolVar(&c.opts.JSON, "json", false, color.GreenString("Display as JSON")) c.cmd = helpCmd @@ -64,7 +90,41 @@ func (c *helpCommand) preRunE(_ *cobra.Command, _ []string) error { return nil } -func (c *helpCommand) execute(_ *cobra.Command, _ []string) { +func (c *helpCommand) execute(_ *cobra.Command, _ []string) error { + if c.opts.JSON { + return c.printJSON() + } + + c.print() + + return nil +} + +func (c *helpCommand) printJSON() error { + var linters []linterHelp + + for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { + if lc.Internal { + continue + } + + linters = append(linters, linterHelp{ + Name: lc.Name(), + Desc: formatDescription(lc.Linter.Desc()), + Fast: !lc.IsSlowLinter(), + AutoFix: lc.CanAutoFix, + Presets: lc.InPresets, + EnabledByDefault: lc.EnabledByDefault, + Deprecated: lc.IsDeprecated(), + Since: lc.Since, + OriginalURL: lc.OriginalURL, + }) + } + + return json.NewEncoder(c.cmd.OutOrStdout()).Encode(linters) +} + +func (c *helpCommand) print() { var enabledLCs, disabledLCs []*linter.Config for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { if lc.Internal { @@ -124,19 +184,52 @@ func printLinters(lcs []*linter.Config) { }) for _, lc := range lcs { - // If the linter description spans multiple lines, truncate everything following the first newline - linterDescription := lc.Linter.Desc() - firstNewline := strings.IndexRune(linterDescription, '\n') - if firstNewline > 0 { - linterDescription = linterDescription[:firstNewline] - } + desc := formatDescription(lc.Linter.Desc()) deprecatedMark := "" if lc.IsDeprecated() { deprecatedMark = " [" + color.RedString("deprecated") + "]" } - _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", - color.YellowString(lc.Name()), deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) + var capabilities []string + if !lc.IsSlowLinter() { + capabilities = append(capabilities, color.BlueString("fast")) + } + if lc.CanAutoFix { + capabilities = append(capabilities, color.GreenString("auto-fix")) + } + + var capability string + if capabilities != nil { + capability = " [" + strings.Join(capabilities, ", ") + "]" + } + + _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s%s\n", + color.YellowString(lc.Name()), deprecatedMark, desc, capability) } } + +func formatDescription(desc string) string { + desc = strings.TrimSpace(desc) + + if desc == "" { + return desc + } + + // If the linter description spans multiple lines, truncate everything following the first newline + endFirstLine := strings.IndexRune(desc, '\n') + if endFirstLine > 0 { + desc = desc[:endFirstLine] + } + + rawDesc := []rune(desc) + + r, _ := utf8.DecodeRuneInString(desc) + rawDesc[0] = unicode.ToUpper(r) + + if rawDesc[len(rawDesc)-1] != '.' { + rawDesc = append(rawDesc, '.') + } + + return string(rawDesc) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index ff7c5e467..57f3cdd99 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -8,12 +8,13 @@ import ( "fmt" "io" "log" + "maps" "os" "path/filepath" "runtime" "runtime/pprof" "runtime/trace" - "sort" + "slices" "strconv" "strings" "time" @@ -24,7 +25,6 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" "go.uber.org/automaxprocs/maxprocs" - "golang.org/x/exp/maps" "gopkg.in/yaml.v3" "github.com/golangci/golangci-lint/internal/cache" @@ -186,6 +186,10 @@ func (c *runCommand) persistentPostRunE(_ *cobra.Command, _ []string) error { } func (c *runCommand) preRunE(_ *cobra.Command, args []string) error { + if c.cfg.GetConfigDir() != "" && c.cfg.Version != "" { + return errors.New("you are using a configuration file for golangci-lint v2 with golangci-lint v1: please use golangci-lint v2") + } + dbManager, err := lintersdb.NewManager(c.log.Child(logutils.DebugKeyLintersDB), c.cfg, lintersdb.NewLinterBuilder(), lintersdb.NewPluginModuleBuilder(c.log), lintersdb.NewPluginGoBuilder(c.log)) if err != nil { @@ -194,7 +198,7 @@ func (c *runCommand) preRunE(_ *cobra.Command, args []string) error { c.dbManager = dbManager - printer, err := printers.NewPrinter(c.log, &c.cfg.Output, c.reportData) + printer, err := printers.NewPrinter(c.log, &c.cfg.Output, c.reportData, c.cfg.GetBasePath()) if err != nil { return err } @@ -217,7 +221,7 @@ func (c *runCommand) preRunE(_ *cobra.Command, args []string) error { pkgLoader := lint.NewPackageLoader(c.log.Child(logutils.DebugKeyLoader), c.cfg, args, c.goenv, guard) - c.contextBuilder = lint.NewContextBuilder(c.cfg, pkgLoader, c.fileCache, pkgCache, guard) + c.contextBuilder = lint.NewContextBuilder(c.cfg, pkgLoader, pkgCache, guard) if err = initHashSalt(c.buildInfo.Version, c.cfg); err != nil { return fmt.Errorf("failed to init hash salt: %w", err) @@ -238,14 +242,21 @@ func (c *runCommand) execute(_ *cobra.Command, args []string) { needTrackResources := logutils.IsVerbose() || c.opts.PrintResourcesUsage trackResourcesEndCh := make(chan struct{}) - defer func() { // XXX: this defer must be before ctx.cancel defer - if needTrackResources { // wait until resource tracking finished to print properly + + // Note: this defer must be before ctx.cancel defer + defer func() { + // wait until resource tracking finished to print properly + if needTrackResources { <-trackResourcesEndCh } }() - ctx, cancel := context.WithTimeout(context.Background(), c.cfg.Run.Timeout) - defer cancel() + ctx := context.Background() + if c.cfg.Run.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.cfg.Run.Timeout) + defer cancel() + } if needTrackResources { go watchResources(ctx, trackResourcesEndCh, c.log, c.debugf) @@ -445,8 +456,7 @@ func (c *runCommand) printStats(issues []result.Issue) { c.cmd.Printf("%d issues:\n", len(issues)) - keys := maps.Keys(stats) - sort.Strings(keys) + keys := slices.Sorted(maps.Keys(stats)) for _, key := range keys { c.cmd.Printf("* %s: %d\n", key, stats[key]) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/base_rule.go b/vendor/github.com/golangci/golangci-lint/pkg/config/base_rule.go new file mode 100644 index 000000000..780c60cd2 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/base_rule.go @@ -0,0 +1,75 @@ +package config + +import ( + "errors" + "fmt" + "regexp" +) + +type BaseRule struct { + Linters []string + Path string + PathExcept string `mapstructure:"path-except"` + Text string + Source string + + // For compatibility with exclude-use-default/include. + InternalReference string `mapstructure:"-"` +} + +func (b *BaseRule) Validate(minConditionsCount int) error { + if err := validateOptionalRegex(b.Path); err != nil { + return fmt.Errorf("invalid path regex: %w", err) + } + + if err := validateOptionalRegex(b.PathExcept); err != nil { + return fmt.Errorf("invalid path-except regex: %w", err) + } + + if err := validateOptionalRegex(b.Text); err != nil { + return fmt.Errorf("invalid text regex: %w", err) + } + + if err := validateOptionalRegex(b.Source); err != nil { + return fmt.Errorf("invalid source regex: %w", err) + } + + if b.Path != "" && b.PathExcept != "" { + return errors.New("path and path-except should not be set at the same time") + } + + nonBlank := 0 + if len(b.Linters) > 0 { + nonBlank++ + } + + // Filtering by path counts as one condition, regardless how it is done (one or both). + // Otherwise, a rule with Path and PathExcept set would pass validation + // whereas before the introduction of path-except that wouldn't have been precise enough. + if b.Path != "" || b.PathExcept != "" { + nonBlank++ + } + + if b.Text != "" { + nonBlank++ + } + + if b.Source != "" { + nonBlank++ + } + + if nonBlank < minConditionsCount { + return fmt.Errorf("at least %d of (text, source, path[-except], linters) should be set", minConditionsCount) + } + + return nil +} + +func validateOptionalRegex(value string) error { + if value == "" { + return nil + } + + _, err := regexp.Compile(value) + return err +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index b863b329f..ee7a62b7e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -1,16 +1,26 @@ package config import ( + "cmp" + "context" + "fmt" "os" + "path/filepath" + "slices" "strings" hcversion "github.com/hashicorp/go-version" - "github.com/ldez/gomoddirectives" + "github.com/ldez/grignotin/goenv" + "github.com/ldez/grignotin/gomod" + "golang.org/x/mod/modfile" ) // Config encapsulates the config data specified in the golangci-lint YAML config file. type Config struct { - cfgDir string // The directory containing the golangci-lint config file. + cfgDir string // Path to the directory containing golangci-lint config file. + basePath string // Path the root directory related to [Run.RelativePathMode]. + + Version string `mapstructure:"version"` // From v2, to be able to detect v2 config file. Run Run `mapstructure:"run"` @@ -25,11 +35,15 @@ type Config struct { InternalTest bool // Option is used only for testing golangci-lint code, don't use it } -// GetConfigDir returns the directory that contains golangci config file. +// GetConfigDir returns the directory that contains golangci-lint config file. func (c *Config) GetConfigDir() string { return c.cfgDir } +func (c *Config) GetBasePath() string { + return c.basePath +} + func (c *Config) Validate() error { validators := []func() error{ c.Run.Validate, @@ -74,39 +88,94 @@ func IsGoGreaterThanOrEqual(current, limit string) bool { return v1.GreaterThanOrEqual(l) } -func detectGoVersion() string { - goVersion := detectGoVersionFromGoMod() - if goVersion != "" { - return goVersion - } - - v := os.Getenv("GOVERSION") - if v != "" { - return v - } - - return "1.17" +func detectGoVersion(ctx context.Context) string { + return cmp.Or(detectGoVersionFromGoMod(ctx), "1.17") } // detectGoVersionFromGoMod tries to get Go version from go.mod. // It returns `toolchain` version if present, // else it returns `go` version if present, +// else it returns `GOVERSION` version if present, // else it returns empty. -func detectGoVersionFromGoMod() string { - file, _ := gomoddirectives.GetModuleFile() - if file == nil { - return "" +func detectGoVersionFromGoMod(ctx context.Context) string { + values, err := goenv.Get(ctx, goenv.GOMOD, goenv.GOVERSION) + if err != nil { + values = map[string]string{ + goenv.GOMOD: detectGoModFallback(ctx), + } + } + + if values[goenv.GOMOD] == "" { + return parseGoVersion(values[goenv.GOVERSION]) + } + + file, err := parseGoMod(values[goenv.GOMOD]) + if err != nil { + return parseGoVersion(values[goenv.GOVERSION]) } // The toolchain exists only if 'toolchain' version > 'go' version. // If 'toolchain' version <= 'go' version, `go mod tidy` will remove 'toolchain' version from go.mod. if file.Toolchain != nil && file.Toolchain.Name != "" { - return strings.TrimPrefix(file.Toolchain.Name, "go") + return parseGoVersion(file.Toolchain.Name) } if file.Go != nil && file.Go.Version != "" { return file.Go.Version } - return "" + return parseGoVersion(values[goenv.GOVERSION]) +} + +func parseGoVersion(v string) string { + raw := strings.TrimPrefix(v, "go") + + // prerelease version (ex: go1.24rc1) + idx := strings.IndexFunc(raw, func(r rune) bool { + return (r < '0' || r > '9') && r != '.' + }) + + if idx != -1 { + raw = raw[:idx] + } + + return raw +} + +func parseGoMod(goMod string) (*modfile.File, error) { + raw, err := os.ReadFile(filepath.Clean(goMod)) + if err != nil { + return nil, fmt.Errorf("reading go.mod file: %w", err) + } + + return modfile.Parse("go.mod", raw, nil) +} + +func detectGoModFallback(ctx context.Context) string { + info, err := gomod.GetModuleInfo(ctx) + if err != nil { + return "" + } + + wd, err := os.Getwd() + if err != nil { + return "" + } + + slices.SortFunc(info, func(a, b gomod.ModInfo) int { + return cmp.Compare(len(b.Path), len(a.Path)) + }) + + goMod := info[0] + for _, m := range info { + if !strings.HasPrefix(wd, m.Dir) { + continue + } + + goMod = m + + break + } + + return goMod.GoMod } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go index 2ee9364aa..d5b6650f9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -1,13 +1,9 @@ package config import ( - "errors" "fmt" - "regexp" ) -const excludeRuleMinConditionsCount = 2 - var DefaultExcludePatterns = []ExcludePattern{ { ID: "EXC0001", @@ -117,17 +113,19 @@ type Issues struct { UseDefaultExcludeDirs bool `mapstructure:"exclude-dirs-use-default"` - MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` - MaxSameIssues int `mapstructure:"max-same-issues"` + MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` + MaxSameIssues int `mapstructure:"max-same-issues"` + UniqByLine bool `mapstructure:"uniq-by-line"` DiffFromRevision string `mapstructure:"new-from-rev"` + DiffFromMergeBase string `mapstructure:"new-from-merge-base"` DiffPatchFilePath string `mapstructure:"new-from-patch"` WholeFiles bool `mapstructure:"whole-files"` Diff bool `mapstructure:"new"` NeedFix bool `mapstructure:"fix"` - ExcludeGeneratedStrict bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. + ExcludeGeneratedStrict *bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. } func (i *Issues) Validate() error { @@ -140,79 +138,6 @@ func (i *Issues) Validate() error { return nil } -type ExcludeRule struct { - BaseRule `mapstructure:",squash"` -} - -func (e *ExcludeRule) Validate() error { - return e.BaseRule.Validate(excludeRuleMinConditionsCount) -} - -type BaseRule struct { - Linters []string - Path string - PathExcept string `mapstructure:"path-except"` - Text string - Source string -} - -func (b *BaseRule) Validate(minConditionsCount int) error { - if err := validateOptionalRegex(b.Path); err != nil { - return fmt.Errorf("invalid path regex: %w", err) - } - - if err := validateOptionalRegex(b.PathExcept); err != nil { - return fmt.Errorf("invalid path-except regex: %w", err) - } - - if err := validateOptionalRegex(b.Text); err != nil { - return fmt.Errorf("invalid text regex: %w", err) - } - - if err := validateOptionalRegex(b.Source); err != nil { - return fmt.Errorf("invalid source regex: %w", err) - } - - if b.Path != "" && b.PathExcept != "" { - return errors.New("path and path-except should not be set at the same time") - } - - nonBlank := 0 - if len(b.Linters) > 0 { - nonBlank++ - } - - // Filtering by path counts as one condition, regardless how it is done (one or both). - // Otherwise, a rule with Path and PathExcept set would pass validation - // whereas before the introduction of path-except that wouldn't have been precise enough. - if b.Path != "" || b.PathExcept != "" { - nonBlank++ - } - - if b.Text != "" { - nonBlank++ - } - - if b.Source != "" { - nonBlank++ - } - - if nonBlank < minConditionsCount { - return fmt.Errorf("at least %d of (text, source, path[-except], linters) should be set", minConditionsCount) - } - - return nil -} - -func validateOptionalRegex(value string) error { - if value == "" { - return nil - } - - _, err := regexp.Compile(value) - return err -} - type ExcludePattern struct { ID string Pattern string @@ -220,14 +145,6 @@ type ExcludePattern struct { Why string } -func GetDefaultExcludePatternsStrings() []string { - ret := make([]string, len(DefaultExcludePatterns)) - for i, p := range DefaultExcludePatterns { - ret[i] = p.Pattern - } - return ret -} - // TODO(ldez): this behavior must be changed in v2, because this is confusing. func GetExcludePatterns(include []string) []ExcludePattern { includeMap := make(map[string]struct{}, len(include)) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go index 5c2628272..4814d1eb2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters.go @@ -13,15 +13,21 @@ type Linters struct { Fast bool Presets []string + + LinterExclusions LinterExclusions `mapstructure:"exclusions"` } func (l *Linters) Validate() error { - if err := l.validateAllDisableEnableOptions(); err != nil { - return err + validators := []func() error{ + l.validateAllDisableEnableOptions, + l.validateDisabledAndEnabledAtOneMoment, + l.LinterExclusions.Validate, } - if err := l.validateDisabledAndEnabledAtOneMoment(); err != nil { - return err + for _, v := range validators { + if err := v(); err != nil { + return err + } } return nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_exclusions.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_exclusions.go new file mode 100644 index 000000000..3bed6dfc1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_exclusions.go @@ -0,0 +1,55 @@ +package config + +import ( + "fmt" + "slices" +) + +const ( + ExclusionPresetComments = "comments" + ExclusionPresetStdErrorHandling = "stdErrorHandling" + ExclusionPresetCommonFalsePositives = "commonFalsePositives" + ExclusionPresetLegacy = "legacy" +) + +const excludeRuleMinConditionsCount = 2 + +type LinterExclusions struct { + Generated string `mapstructure:"generated"` + WarnUnused bool `mapstructure:"warn-unused"` + Presets []string `mapstructure:"preset"` + Rules []ExcludeRule `mapstructure:"rules"` + Paths []string `mapstructure:"paths"` + PathsExcept []string `mapstructure:"paths-except"` +} + +func (e *LinterExclusions) Validate() error { + for i, rule := range e.Rules { + if err := rule.Validate(); err != nil { + return fmt.Errorf("error in exclude rule #%d: %w", i, err) + } + } + + allPresets := []string{ + ExclusionPresetComments, + ExclusionPresetStdErrorHandling, + ExclusionPresetCommonFalsePositives, + ExclusionPresetLegacy, + } + + for _, preset := range e.Presets { + if !slices.Contains(allPresets, preset) { + return fmt.Errorf("invalid preset: %s", preset) + } + } + + return nil +} + +type ExcludeRule struct { + BaseRule `mapstructure:",squash"` +} + +func (e *ExcludeRule) Validate() error { + return e.BaseRule.Validate(excludeRuleMinConditionsCount) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go index b182d1e0f..94650a66d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -105,6 +105,7 @@ var defaultLintersSettings = LintersSettings{ Kitlog: true, Klog: true, Logr: true, + Slog: true, Zap: true, RequireStringKey: false, NoPrintfLike: false, @@ -125,11 +126,16 @@ var defaultLintersSettings = LintersSettings{ AllowUnused: false, }, PerfSprint: PerfSprintSettings{ + IntegerFormat: true, IntConversion: true, + ErrorFormat: true, ErrError: false, ErrorF: true, + StringFormat: true, SprintF1: true, StrConcat: true, + BoolFormat: true, + HexFormat: true, }, Prealloc: PreallocSettings{ Simple: true, @@ -168,7 +174,6 @@ var defaultLintersSettings = LintersSettings{ Unused: UnusedSettings{ FieldWritesAreUses: true, PostStatementsAreReads: false, - ExportedIsUsed: true, ExportedFieldsAreUsed: true, ParametersAreUsed: true, LocalVariablesAreUsed: true, @@ -178,6 +183,15 @@ var defaultLintersSettings = LintersSettings{ HTTPMethod: true, HTTPStatusCode: true, }, + UseTesting: UseTestingSettings{ + ContextBackground: true, + ContextTodo: true, + OSChdir: true, + OSMkdirTemp: true, + OSSetenv: true, + OSTempDir: false, + OSCreateTemp: true, + }, Varnamelen: VarnamelenSettings{ MaxDistance: 5, MinNameLength: 3, @@ -214,6 +228,7 @@ type LintersSettings struct { ErrorLint ErrorLintSettings Exhaustive ExhaustiveSettings Exhaustruct ExhaustructSettings + Fatcontext FatcontextSettings Forbidigo ForbidigoSettings Funlen FunlenSettings Gci GciSettings @@ -261,6 +276,7 @@ type LintersSettings struct { Promlinter PromlinterSettings ProtoGetter ProtoGetterSettings Reassign ReassignSettings + Recvcheck RecvcheckSettings Revive ReviveSettings RowsErrCheck RowsErrCheckSettings SlogLint SlogLintSettings @@ -277,6 +293,7 @@ type LintersSettings struct { Unparam UnparamSettings Unused UnusedSettings UseStdlibVars UseStdlibVarsSettings + UseTesting UseTestingSettings Varnamelen VarnamelenSettings Whitespace WhitespaceSettings Wrapcheck WrapcheckSettings @@ -318,8 +335,10 @@ type BiDiChkSettings struct { } type CopyLoopVarSettings struct { - IgnoreAlias bool `mapstructure:"ignore-alias"` // Deprecated: use CheckAlias - CheckAlias bool `mapstructure:"check-alias"` + CheckAlias bool `mapstructure:"check-alias"` + + // Deprecated: use CheckAlias + IgnoreAlias *bool `mapstructure:"ignore-alias"` } type Cyclop struct { @@ -417,6 +436,10 @@ type ExhaustructSettings struct { Exclude []string `mapstructure:"exclude"` } +type FatcontextSettings struct { + CheckStructPointers bool `mapstructure:"check-struct-pointers"` +} + type ForbidigoSettings struct { Forbid []ForbidigoPattern `mapstructure:"forbid"` ExcludeGodocExamples bool `mapstructure:"exclude-godoc-examples"` @@ -466,10 +489,12 @@ type FunlenSettings struct { } type GciSettings struct { - Sections []string `mapstructure:"sections"` - SkipGenerated bool `mapstructure:"skip-generated"` - CustomOrder bool `mapstructure:"custom-order"` - NoLexOrder bool `mapstructure:"no-lex-order"` + Sections []string `mapstructure:"sections"` + NoInlineComments bool `mapstructure:"no-inline-comments"` + NoPrefixComments bool `mapstructure:"no-prefix-comments"` + SkipGenerated bool `mapstructure:"skip-generated"` + CustomOrder bool `mapstructure:"custom-order"` + NoLexOrder bool `mapstructure:"no-lex-order"` // Deprecated: use Sections instead. LocalPrefixes string `mapstructure:"local-prefixes"` @@ -492,6 +517,7 @@ type GinkgoLinterSettings struct { type GoChecksumTypeSettings struct { DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + IncludeSharedInterfaces bool `mapstructure:"include-shared-interfaces"` } type GocognitSettings struct { @@ -534,7 +560,7 @@ type GodotSettings struct { Period bool `mapstructure:"period"` // Deprecated: use Scope instead - CheckAll bool `mapstructure:"check-all"` + CheckAll *bool `mapstructure:"check-all"` } type GodoxSettings struct { @@ -574,6 +600,11 @@ type GoModDirectivesSettings struct { ReplaceLocal bool `mapstructure:"replace-local"` ExcludeForbidden bool `mapstructure:"exclude-forbidden"` RetractAllowNoExplanation bool `mapstructure:"retract-allow-no-explanation"` + ToolchainForbidden bool `mapstructure:"toolchain-forbidden"` + ToolchainPattern string `mapstructure:"toolchain-pattern"` + ToolForbidden bool `mapstructure:"tool-forbidden"` + GoDebugForbidden bool `mapstructure:"go-debug-forbidden"` + GoVersionPattern string `mapstructure:"go-version-pattern"` } type GoModGuardSettings struct { @@ -622,7 +653,7 @@ type GovetSettings struct { Settings map[string]map[string]any // Deprecated: the linter should be enabled inside Enable. - CheckShadowing bool `mapstructure:"check-shadowing"` + CheckShadowing *bool `mapstructure:"check-shadowing"` } func (cfg *GovetSettings) Validate() error { @@ -687,6 +718,7 @@ type LoggerCheckSettings struct { Kitlog bool `mapstructure:"kitlog"` Klog bool `mapstructure:"klog"` Logr bool `mapstructure:"logr"` + Slog bool `mapstructure:"slog"` Zap bool `mapstructure:"zap"` RequireStringKey bool `mapstructure:"require-string-key"` NoPrintfLike bool `mapstructure:"no-printf-like"` @@ -764,11 +796,19 @@ type ParallelTestSettings struct { } type PerfSprintSettings struct { + IntegerFormat bool `mapstructure:"integer-format"` IntConversion bool `mapstructure:"int-conversion"` - ErrError bool `mapstructure:"err-error"` - ErrorF bool `mapstructure:"errorf"` - SprintF1 bool `mapstructure:"sprintf1"` - StrConcat bool `mapstructure:"strconcat"` + + ErrorFormat bool `mapstructure:"error-format"` + ErrError bool `mapstructure:"err-error"` + ErrorF bool `mapstructure:"errorf"` + + StringFormat bool `mapstructure:"string-format"` + SprintF1 bool `mapstructure:"sprintf1"` + StrConcat bool `mapstructure:"strconcat"` + + BoolFormat bool `mapstructure:"bool-format"` + HexFormat bool `mapstructure:"hex-format"` } type PreallocSettings struct { @@ -798,6 +838,11 @@ type ReassignSettings struct { Patterns []string `mapstructure:"patterns"` } +type RecvcheckSettings struct { + DisableBuiltin bool `mapstructure:"disable-builtin"` + Exclusions []string `mapstructure:"exclusions"` +} + type ReviveSettings struct { Go string `mapstructure:"-"` MaxOpenFiles int `mapstructure:"max-open-files"` @@ -837,7 +882,7 @@ type SlogLintSettings struct { ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"` // Deprecated: use Context instead. - ContextOnly bool `mapstructure:"context-only"` + ContextOnly *bool `mapstructure:"context-only"` } type SpancheckSettings struct { @@ -868,10 +913,31 @@ type TagAlignSettings struct { } type TagliatelleSettings struct { - Case struct { - Rules map[string]string - UseFieldName bool `mapstructure:"use-field-name"` - } + Case TagliatelleCase +} + +type TagliatelleCase struct { + TagliatelleBase `mapstructure:",squash"` + Overrides []TagliatelleOverrides +} + +type TagliatelleOverrides struct { + TagliatelleBase `mapstructure:",squash"` + Package string `mapstructure:"pkg"` + Ignore bool `mapstructure:"ignore"` +} + +type TagliatelleBase struct { + Rules map[string]string `mapstructure:"rules"` + ExtendedRules map[string]TagliatelleExtendedRule `mapstructure:"extended-rules"` + UseFieldName bool `mapstructure:"use-field-name"` + IgnoredFields []string `mapstructure:"ignored-fields"` +} + +type TagliatelleExtendedRule struct { + Case string + ExtraInitialisms bool + InitialismOverrides map[string]bool } type TestifylintSettings struct { @@ -936,11 +1002,24 @@ type UseStdlibVarsSettings struct { TimeLayout bool `mapstructure:"time-layout"` CryptoHash bool `mapstructure:"crypto-hash"` DefaultRPCPath bool `mapstructure:"default-rpc-path"` - OSDevNull bool `mapstructure:"os-dev-null"` // Deprecated SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` ConstantKind bool `mapstructure:"constant-kind"` - SyslogPriority bool `mapstructure:"syslog-priority"` // Deprecated + + // Deprecated + OSDevNull *bool `mapstructure:"os-dev-null"` + // Deprecated + SyslogPriority *bool `mapstructure:"syslog-priority"` +} + +type UseTestingSettings struct { + ContextBackground bool `mapstructure:"context-background"` + ContextTodo bool `mapstructure:"context-todo"` + OSChdir bool `mapstructure:"os-chdir"` + OSMkdirTemp bool `mapstructure:"os-mkdir-temp"` + OSSetenv bool `mapstructure:"os-setenv"` + OSTempDir bool `mapstructure:"os-temp-dir"` + OSCreateTemp bool `mapstructure:"os-create-temp"` } type UnconvertSettings struct { @@ -956,11 +1035,13 @@ type UnparamSettings struct { type UnusedSettings struct { FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"` PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"` - ExportedIsUsed bool `mapstructure:"exported-is-used"` // Deprecated ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"` ParametersAreUsed bool `mapstructure:"parameters-are-used"` LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"` GeneratedIsUsed bool `mapstructure:"generated-is-used"` + + // Deprecated + ExportedIsUsed *bool `mapstructure:"exported-is-used"` } type VarnamelenSettings struct { @@ -982,6 +1063,7 @@ type WhitespaceSettings struct { } type WrapcheckSettings struct { + ExtraIgnoreSigs []string `mapstructure:"extra-ignore-sigs"` // TODO(ldez): v2 the options must be renamed to use hyphen. IgnoreSigs []string `mapstructure:"ignoreSigs"` IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"` diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go index efeed3ca4..dc9ceeadd 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go @@ -1,6 +1,8 @@ package config import ( + "cmp" + "context" "errors" "fmt" "os" @@ -66,6 +68,26 @@ func (l *Loader) Load(opts LoadOptions) error { l.applyStringSliceHack() + if l.cfg.Linters.LinterExclusions.Generated == "" { + // `l.cfg.Issues.ExcludeGenerated` is always non-empty because of the flag default value. + l.cfg.Linters.LinterExclusions.Generated = cmp.Or(l.cfg.Issues.ExcludeGenerated, "strict") + } + + // Compatibility layer with v1. + // TODO(ldez): should be removed in v2. + if l.cfg.Issues.UseDefaultExcludes { + l.cfg.Linters.LinterExclusions.Presets = []string{ + ExclusionPresetComments, + ExclusionPresetStdErrorHandling, + ExclusionPresetCommonFalsePositives, + ExclusionPresetLegacy, + } + } + + if len(l.cfg.Issues.ExcludeRules) > 0 { + l.cfg.Linters.LinterExclusions.Rules = append(l.cfg.Linters.LinterExclusions.Rules, l.cfg.Issues.ExcludeRules...) + } + if opts.CheckDeprecation { err = l.handleDeprecation() if err != nil { @@ -80,6 +102,11 @@ func (l *Loader) Load(opts LoadOptions) error { return err } + l.cfg.basePath, err = fsutils.GetBasePath(context.Background(), l.cfg.Run.RelativePathMode, l.cfg.cfgDir) + if err != nil { + return fmt.Errorf("get base path: %w", err) + } + err = l.handleEnableOnlyOption() if err != nil { return err @@ -285,16 +312,14 @@ func (l *Loader) appendStringSlice(name string, current *[]string) { func (l *Loader) handleGoVersion() { if l.cfg.Run.Go == "" { - l.cfg.Run.Go = detectGoVersion() + l.cfg.Run.Go = detectGoVersion(context.Background()) } l.cfg.LintersSettings.Govet.Go = l.cfg.Run.Go l.cfg.LintersSettings.ParallelTest.Go = l.cfg.Run.Go - if l.cfg.LintersSettings.Gofumpt.LangVersion == "" { - l.cfg.LintersSettings.Gofumpt.LangVersion = l.cfg.Run.Go - } + l.cfg.LintersSettings.Gofumpt.LangVersion = cmp.Or(l.cfg.LintersSettings.Gofumpt.LangVersion, l.cfg.Run.Go) trimmedGoVersion := goutil.TrimGoVersion(l.cfg.Run.Go) @@ -322,19 +347,23 @@ func (l *Loader) handleDeprecation() error { l.cfg.Issues.ExcludeDirs = l.cfg.Run.SkipDirs } - // The 2 options are true by default. // Deprecated since v1.57.0 - if !l.cfg.Run.UseDefaultSkipDirs { + if l.cfg.Run.UseDefaultSkipDirs != nil { l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") + l.cfg.Issues.UseDefaultExcludeDirs = *l.cfg.Run.UseDefaultSkipDirs } - l.cfg.Issues.UseDefaultExcludeDirs = l.cfg.Run.UseDefaultSkipDirs && l.cfg.Issues.UseDefaultExcludeDirs - // The 2 options are false by default. // Deprecated since v1.57.0 - if l.cfg.Run.ShowStats { + if l.cfg.Run.ShowStats != nil { l.log.Warnf("The configuration option `run.show-stats` is deprecated, please use `output.show-stats`") + l.cfg.Output.ShowStats = *l.cfg.Run.ShowStats + } + + // Deprecated since v1.63.0 + if l.cfg.Output.UniqByLine != nil { + l.log.Warnf("The configuration option `output.uniq-by-line` is deprecated, please use `issues.uniq-by-line`") + l.cfg.Issues.UniqByLine = *l.cfg.Output.UniqByLine } - l.cfg.Output.ShowStats = l.cfg.Run.ShowStats || l.cfg.Output.ShowStats // Deprecated since v1.57.0 if l.cfg.Output.Format != "" { @@ -357,9 +386,11 @@ func (l *Loader) handleDeprecation() error { } // Deprecated since v1.59.0 - if l.cfg.Issues.ExcludeGeneratedStrict { + if l.cfg.Issues.ExcludeGeneratedStrict != nil { l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`") - l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. + if !*l.cfg.Issues.ExcludeGeneratedStrict { + l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. + } } l.handleLinterOptionDeprecations() @@ -367,16 +398,15 @@ func (l *Loader) handleDeprecation() error { return nil } -//nolint:gocyclo // the complexity cannot be reduced. func (l *Loader) handleLinterOptionDeprecations() { // Deprecated since v1.57.0, // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). - if l.cfg.LintersSettings.Govet.CheckShadowing { + if l.cfg.LintersSettings.Govet.CheckShadowing != nil { l.log.Warnf("The configuration option `linters.govet.check-shadowing` is deprecated. " + "Please enable `shadow` instead, if you are not using `enable-all`.") } - if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias { + if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias != nil { l.log.Warnf("The configuration option `linters.copyloopvar.ignore-alias` is deprecated and ignored," + "please use `linters.copyloopvar.check-alias`.") } @@ -398,7 +428,7 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.33.0. - if l.cfg.LintersSettings.Godot.CheckAll { + if l.cfg.LintersSettings.Godot.CheckAll != nil { l.log.Warnf("The configuration option `linters.godot.check-all` is deprecated, please use `linters.godot.scope: all`.") } @@ -423,25 +453,23 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.60.0 - if !l.cfg.LintersSettings.Unused.ExportedIsUsed { + if l.cfg.LintersSettings.Unused.ExportedIsUsed != nil { l.log.Warnf("The configuration option `linters.unused.exported-is-used` is deprecated.") } // Deprecated since v1.58.0 - if l.cfg.LintersSettings.SlogLint.ContextOnly { + if l.cfg.LintersSettings.SlogLint.ContextOnly != nil { l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.") - if l.cfg.LintersSettings.SlogLint.Context == "" { - l.cfg.LintersSettings.SlogLint.Context = "all" - } + l.cfg.LintersSettings.SlogLint.Context = cmp.Or(l.cfg.LintersSettings.SlogLint.Context, "all") } // Deprecated since v1.51.0 - if l.cfg.LintersSettings.UseStdlibVars.OSDevNull { + if l.cfg.LintersSettings.UseStdlibVars.OSDevNull != nil { l.log.Warnf("The configuration option `linters.usestdlibvars.os-dev-null` is deprecated.") } // Deprecated since v1.51.0 - if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority { + if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority != nil { l.log.Warnf("The configuration option `linters.usestdlibvars.syslog-priority` is deprecated.") } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go index 6a26d5773..caddb095c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -16,8 +16,8 @@ const ( OutFormatCheckstyle = "checkstyle" OutFormatCodeClimate = "code-climate" OutFormatHTML = "html" - OutFormatJunitXML = "junit-xml" - OutFormatJunitXMLExtended = "junit-xml-extended" + OutFormatJUnitXML = "junit-xml" + OutFormatJUnitXMLExtended = "junit-xml-extended" OutFormatGithubActions = "github-actions" // Deprecated OutFormatTeamCity = "teamcity" OutFormatSarif = "sarif" @@ -32,8 +32,8 @@ var AllOutputFormats = []string{ OutFormatCheckstyle, OutFormatCodeClimate, OutFormatHTML, - OutFormatJunitXML, - OutFormatJunitXMLExtended, + OutFormatJUnitXML, + OutFormatJUnitXMLExtended, OutFormatGithubActions, OutFormatTeamCity, OutFormatSarif, @@ -43,7 +43,6 @@ type Output struct { Formats OutputFormats `mapstructure:"formats"` PrintIssuedLine bool `mapstructure:"print-issued-lines"` PrintLinterName bool `mapstructure:"print-linter-name"` - UniqByLine bool `mapstructure:"uniq-by-line"` SortResults bool `mapstructure:"sort-results"` SortOrder []string `mapstructure:"sort-order"` PathPrefix string `mapstructure:"path-prefix"` @@ -51,6 +50,9 @@ type Output struct { // Deprecated: use Formats instead. Format string `mapstructure:"format"` + + // Deprecated: use [Issues.UniqByLine] instead. + UniqByLine *bool `mapstructure:"uniq-by-line"` } func (o *Output) Validate() error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go index 2f6523c0b..8e00f1e6d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -5,6 +5,8 @@ import ( "slices" "strings" "time" + + "github.com/golangci/golangci-lint/pkg/fsutils" ) // Run encapsulates the config options for running the linter analysis. @@ -15,6 +17,8 @@ type Run struct { Go string `mapstructure:"go"` + RelativePathMode string `mapstructure:"relative-path-mode"` + BuildTags []string `mapstructure:"build-tags"` ModulesDownloadMode string `mapstructure:"modules-download-mode"` @@ -29,18 +33,24 @@ type Run struct { // Deprecated: use Issues.ExcludeDirs instead. SkipDirs []string `mapstructure:"skip-dirs"` // Deprecated: use Issues.UseDefaultExcludeDirs instead. - UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` + UseDefaultSkipDirs *bool `mapstructure:"skip-dirs-use-default"` // Deprecated: use Output.ShowStats instead. - ShowStats bool `mapstructure:"show-stats"` + ShowStats *bool `mapstructure:"show-stats"` } func (r *Run) Validate() error { // go help modules - allowedMods := []string{"mod", "readonly", "vendor"} + allowedModes := []string{"mod", "readonly", "vendor"} + + if r.ModulesDownloadMode != "" && !slices.Contains(allowedModes, r.ModulesDownloadMode) { + return fmt.Errorf("invalid modules download path %s, only (%s) allowed", r.ModulesDownloadMode, strings.Join(allowedModes, "|")) + } + + pathRelativeToModes := fsutils.AllRelativePathModes() - if r.ModulesDownloadMode != "" && !slices.Contains(allowedMods, r.ModulesDownloadMode) { - return fmt.Errorf("invalid modules download path %s, only (%s) allowed", r.ModulesDownloadMode, strings.Join(allowedMods, "|")) + if r.RelativePathMode != "" && !slices.Contains(pathRelativeToModes, r.RelativePathMode) { + return fmt.Errorf("invalid relative path mode %s, only (%s) allowed", r.RelativePathMode, strings.Join(pathRelativeToModes, "|")) } return nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/basepath.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/basepath.go new file mode 100644 index 000000000..97d6aced1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/basepath.go @@ -0,0 +1,77 @@ +package fsutils + +import ( + "bytes" + "context" + "errors" + "fmt" + "os/exec" + "path/filepath" + + "github.com/ldez/grignotin/goenv" +) + +// Relative path modes. +const ( + RelativePathModeGoMod = "gomod" + RelativePathModeGitRoot = "gitroot" + RelativePathModeCfg = "cfg" + RelativePathModeWd = "wd" +) + +func AllRelativePathModes() []string { + return []string{RelativePathModeGoMod, RelativePathModeGitRoot, RelativePathModeCfg, RelativePathModeWd} +} + +func GetBasePath(ctx context.Context, mode, cfgDir string) (string, error) { + if mode == "" { + // TODO(ldez): v2 the default should be cfg or gomod. + mode = RelativePathModeWd + } + + switch mode { + case RelativePathModeCfg: + if cfgDir == "" { + return GetBasePath(ctx, RelativePathModeWd, cfgDir) + } + + return cfgDir, nil + + case RelativePathModeGoMod: + goMod, err := goenv.GetOne(ctx, goenv.GOMOD) + if err != nil { + return "", fmt.Errorf("get go.mod path: %w", err) + } + + return filepath.Dir(goMod), nil + + case RelativePathModeGitRoot: + root, err := gitRoot(ctx) + if err != nil { + return "", fmt.Errorf("get git root: %w", err) + } + + return root, nil + + case RelativePathModeWd: + wd, err := Getwd() + if err != nil { + return "", fmt.Errorf("get wd: %w", err) + } + + return wd, nil + + default: + return "", errors.New("unknown relative path mode") + } +} + +func gitRoot(ctx context.Context) (string, error) { + cmd := exec.CommandContext(ctx, "git", "rev-parse", "--show-toplevel") + out, err := cmd.Output() + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(out)), nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go index 80bb9c5b4..ead18a537 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -34,13 +34,13 @@ func Getwd() (string, error) { return } - evaledWd, err := EvalSymlinks(cachedWd) + evaluatedWd, err := EvalSymlinks(cachedWd) if err != nil { cachedWd, cachedWdError = "", fmt.Errorf("can't eval symlinks on wd %s: %w", cachedWd, err) return } - cachedWd = evaledWd + cachedWd = evaluatedWd }) return cachedWd, cachedWdError @@ -61,7 +61,7 @@ func EvalSymlinks(path string) (string, error) { } var er evalSymlinkRes - er.path, er.err = filepath.EvalSymlinks(path) + er.path, er.err = evalSymlinks(path) evalSymlinkCache.Store(path, er) return er.path, er.err @@ -76,15 +76,15 @@ func ShortestRelPath(path, wd string) (string, error) { } } - evaledPath, err := EvalSymlinks(path) + evaluatedPath, err := EvalSymlinks(path) if err != nil { return "", fmt.Errorf("can't eval symlinks for path %s: %w", path, err) } - path = evaledPath + path = evaluatedPath // make path absolute and then relative to be able to fix this case: - // we are in /test dir, we want to normalize ../test, and have file file.go in this dir; - // it must have normalized path file.go, not ../test/file.go, + // we are in `/test` dir, we want to normalize `../test`, and have file `file.go` in this dir; + // it must have normalized path `file.go`, not `../test/file.go`, var absPath string if filepath.IsAbs(path) { absPath = path diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go new file mode 100644 index 000000000..68e762cf4 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go @@ -0,0 +1,9 @@ +//go:build !windows + +package fsutils + +import "path/filepath" + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go new file mode 100644 index 000000000..19efb1cfc --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go @@ -0,0 +1,39 @@ +//go:build windows + +package fsutils + +import ( + "errors" + "os" + "path/filepath" + "syscall" +) + +// This is a workaround for the behavior of [filepath.EvalSymlinks], +// which fails with [syscall.ENOTDIR] if the specified path contains a junction on Windows. +// Junctions can occur, for example, when a volume is mounted as a subdirectory inside another drive. +// This can usually happen when using the Dev Drives feature and replacing existing directories. +// See: https://github.com/golang/go/issues/40180 +// +// Since [syscall.ENOTDIR] is only returned when calling [filepath.EvalSymlinks] on Windows +// if part of the presented path is a junction and nothing before was a symlink, +// we simply treat this as NOT symlink, +// because a symlink over the junction makes no sense at all. +func evalSymlinks(path string) (string, error) { + resolved, err := filepath.EvalSymlinks(path) + if err == nil { + return resolved, nil + } + + if !errors.Is(err, syscall.ENOTDIR) { + return "", err + } + + _, err = os.Stat(path) + if err != nil { + return "", err + } + + // If exists, we make the path absolute, to be sure... + return filepath.Abs(path) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go index 15d8dd2b3..854e7d15f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go @@ -26,7 +26,7 @@ type EncodingIssue struct { Severity string Pos token.Position LineRange *result.Range - Replacement *result.Replacement + SuggestedFixes []analysis.SuggestedFix ExpectNoLint bool ExpectedNoLintLinter string } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go new file mode 100644 index 000000000..28441b341 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go @@ -0,0 +1,50 @@ +package goanalysis + +import ( + "go/ast" + "go/token" + "path/filepath" + + "golang.org/x/tools/go/analysis" +) + +func GetGoFilePosition(pass *analysis.Pass, f *ast.File) (token.Position, bool) { + position := GetFilePositionFor(pass.Fset, f.Pos()) + + if filepath.Ext(position.Filename) == ".go" { + return position, true + } + + return position, false +} + +func GetFilePositionFor(fset *token.FileSet, p token.Pos) token.Position { + pos := fset.PositionFor(p, true) + + ext := filepath.Ext(pos.Filename) + if ext != ".go" { + // position has been adjusted to a non-go file, revert to original file + return fset.PositionFor(p, false) + } + + return pos +} + +func EndOfLinePos(f *token.File, line int) token.Pos { + var end token.Pos + + if line >= f.LineCount() { + // missing newline at the end of the file + end = f.Pos(f.Size()) + } else { + end = f.LineStart(line+1) - token.Pos(1) + } + + return end +} + +// AdjustPos is a hack to get the right line to display. +// It should not be used outside some specific cases. +func AdjustPos(line, nonAdjLine, adjLine int) int { + return line + nonAdjLine - adjLine +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go index ac03c71ec..7cff0149a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go @@ -8,11 +8,11 @@ import ( "encoding/gob" "fmt" "go/token" + "maps" "runtime" - "sort" + "slices" "sync" - "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -42,6 +42,7 @@ type Diagnostic struct { Analyzer *analysis.Analyzer Position token.Position Pkg *packages.Package + File *token.File } type runner struct { @@ -121,9 +122,9 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, } act = actAlloc.alloc() - act.a = a - act.pkg = pkg - act.r = r + act.Analyzer = a + act.Package = pkg + act.runner = r act.isInitialPkg = initialPkgs[pkg] act.needAnalyzeSource = initialPkgs[pkg] act.analysisDoneCh = make(chan struct{}) @@ -132,11 +133,11 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, if len(a.FactTypes) > 0 { depsCount += len(pkg.Imports) } - act.deps = make([]*action, 0, depsCount) + act.Deps = make([]*action, 0, depsCount) // Add a dependency on each required analyzers. for _, req := range a.Requires { - act.deps = append(act.deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) + act.Deps = append(act.Deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) } r.buildActionFactDeps(act, a, pkg, initialPkgs, actions, actAlloc) @@ -158,11 +159,11 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac act.objectFacts = make(map[objectFactKey]analysis.Fact) act.packageFacts = make(map[packageFactKey]analysis.Fact) - paths := maps.Keys(pkg.Imports) - sort.Strings(paths) // for determinism + paths := slices.Sorted(maps.Keys(pkg.Imports)) // for determinism + for _, path := range paths { dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) - act.deps = append(act.deps, dep) + act.Deps = append(act.Deps, dep) } // Need to register fact types for pkgcache proper gob encoding. @@ -203,12 +204,12 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, for _, a := range analyzers { for _, pkg := range pkgs { root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc) - root.isroot = true + root.IsRoot = true roots = append(roots, root) } } - allActions = maps.Values(actions) + allActions = slices.Collect(maps.Values(actions)) debugf("Built %d actions", len(actions)) @@ -220,7 +221,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze actionPerPkg := map[*packages.Package][]*action{} for _, act := range actions { - actionPerPkg[act.pkg] = append(actionPerPkg[act.pkg], act) + actionPerPkg[act.Package] = append(actionPerPkg[act.Package], act) } // Fill Imports field. @@ -250,7 +251,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze } } for _, act := range actions { - dfs(act.pkg) + dfs(act.Package) } // Limit memory and IO usage. @@ -282,7 +283,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err for _, act := range actions { if !extracted[act] { extracted[act] = true - visitAll(act.deps) + visitAll(act.Deps) extract(act) } } @@ -299,31 +300,34 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err seen := make(map[key]bool) extract = func(act *action) { - if act.err != nil { - if pe, ok := act.err.(*errorutil.PanicError); ok { + if act.Err != nil { + if pe, ok := act.Err.(*errorutil.PanicError); ok { panic(pe) } - retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err)) + retErrors = append(retErrors, fmt.Errorf("%s: %w", act.Analyzer.Name, act.Err)) return } - if act.isroot { - for _, diag := range act.diagnostics { + if act.IsRoot { + for _, diag := range act.Diagnostics { // We don't display a.Name/f.Category // as most users don't care. - posn := act.pkg.Fset.Position(diag.Pos) - k := key{posn, act.a, diag.Message} + position := GetFilePositionFor(act.Package.Fset, diag.Pos) + file := act.Package.Fset.File(diag.Pos) + + k := key{Position: position, Analyzer: act.Analyzer, message: diag.Message} if seen[k] { continue // duplicate } seen[k] = true retDiag := Diagnostic{ + File: file, Diagnostic: diag, - Analyzer: act.a, - Position: posn, - Pkg: act.pkg, + Analyzer: act.Analyzer, + Position: position, + Pkg: act.Package, } retDiags = append(retDiags, retDiag) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go index 152cab181..2e1c41422 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go @@ -29,8 +29,8 @@ func (actAlloc *actionAllocator) alloc() *action { } func (act *action) waitUntilDependingAnalyzersWorked() { - for _, dep := range act.deps { - if dep.pkg == act.pkg { + for _, dep := range act.Deps { + if dep.Package == act.Package { <-dep.analysisDoneCh } } @@ -39,26 +39,26 @@ func (act *action) waitUntilDependingAnalyzersWorked() { func (act *action) analyzeSafe() { defer func() { if p := recover(); p != nil { - if !act.isroot { + if !act.IsRoot { // This line allows to display "hidden" panic with analyzers like buildssa. // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, // this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA"). - act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack())) + act.runner.log.Errorf("%s: panic during analysis: %v, %s", act.Analyzer.Name, p, string(debug.Stack())) } - act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", - act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) + act.Err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", + act.Analyzer.Name, act.Package.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) } }() - act.r.sw.TrackStage(act.a.Name, act.analyze) + act.runner.sw.TrackStage(act.Analyzer.Name, act.analyze) } func (act *action) markDepsForAnalyzingSource() { // Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing // this action. - for _, dep := range act.deps { - if dep.pkg == act.pkg { + for _, dep := range act.Deps { + if dep.Package == act.Package { // Analyze source only for horizontal dependencies, e.g. from "buildssa". dep.needAnalyzeSource = true // can't be set in parallel } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go index fbc2f82fa..e06ea2979 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go @@ -26,7 +26,7 @@ func (act *action) loadCachedFacts() bool { return true // load cached facts only for non-initial packages } - if len(act.a.FactTypes) == 0 { + if len(act.Analyzer.FactTypes) == 0 { return true // no need to load facts } @@ -38,7 +38,7 @@ func (act *action) loadCachedFacts() bool { } func (act *action) persistFactsToCache() error { - analyzer := act.a + analyzer := act.Analyzer if len(analyzer.FactTypes) == 0 { return nil } @@ -46,7 +46,7 @@ func (act *action) persistFactsToCache() error { // Merge new facts into the package and persist them. var facts []Fact for key, fact := range act.packageFacts { - if key.pkg != act.pkg.Types { + if key.pkg != act.Package.Types { // The fact is from inherited facts from another package continue } @@ -57,7 +57,7 @@ func (act *action) persistFactsToCache() error { } for key, fact := range act.objectFacts { obj := key.obj - if obj.Pkg() != act.pkg.Types { + if obj.Pkg() != act.Package.Types { // The fact is from inherited facts from another package continue } @@ -74,33 +74,33 @@ func (act *action) persistFactsToCache() error { }) } - factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name) - return act.r.pkgCache.Put(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) + return act.runner.pkgCache.Put(act.Package, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) } func (act *action) loadPersistedFacts() bool { var facts []Fact - err := act.r.pkgCache.Get(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(act.a), &facts) + err := act.runner.pkgCache.Get(act.Package, cache.HashModeNeedAllDeps, factCacheKey(act.Analyzer), &facts) if err != nil { if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) { - act.r.log.Warnf("Failed to get persisted facts: %s", err) + act.runner.log.Warnf("Failed to get persisted facts: %s", err) } - factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) + factsCacheDebugf("No cached facts for package %q and analyzer %s", act.Package.Name, act.Analyzer.Name) return false } - factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name) for _, f := range facts { if f.Path == "" { // this is a package fact - key := packageFactKey{act.pkg.Types, act.factType(f.Fact)} + key := packageFactKey{act.Package.Types, act.factType(f.Fact)} act.packageFacts[key] = f.Fact continue } - obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path)) + obj, err := objectpath.Object(act.Package.Types, objectpath.Path(f.Path)) if err != nil { // Be lenient about these errors. For example, when // analyzing io/ioutil from source, we may get a fact diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go similarity index 52% rename from vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go rename to vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go index d868f8f5d..376a37f03 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// Partial copy of https://github.com/golang/tools/blob/dba5486c2a1d03519930812112b23ed2c45c04fc/go/analysis/internal/checker/checker.go +// Altered copy of https://github.com/golang/tools/blob/v0.28.0/go/analysis/internal/checker/checker.go package goanalysis @@ -18,6 +18,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" + "github.com/golangci/golangci-lint/internal/x/tools/analysisflags" + "github.com/golangci/golangci-lint/internal/x/tools/analysisinternal" "github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors" ) @@ -27,19 +29,21 @@ import ( // package (as different analyzers are applied, either in sequence or // parallel), and across packages (as dependencies are analyzed). type action struct { - a *analysis.Analyzer - pkg *packages.Package + Analyzer *analysis.Analyzer + Package *packages.Package + IsRoot bool // whether this is a root node of the graph + Deps []*action + Result any // computed result of Analyzer.run, if any (and if IsRoot) + Err error // error result of Analyzer.run + Diagnostics []analysis.Diagnostic + Duration time.Duration // execution time of this step + pass *analysis.Pass - isroot bool - deps []*action objectFacts map[objectFactKey]analysis.Fact packageFacts map[packageFactKey]analysis.Fact - result any - diagnostics []analysis.Diagnostic - err error // NOTE(ldez) custom fields. - r *runner + runner *runner analysisDoneCh chan struct{} loadCachedFactsDone bool loadCachedFactsOk bool @@ -61,7 +65,7 @@ type packageFactKey struct { // NOTE(ldez) no alteration. func (act *action) String() string { - return fmt.Sprintf("%s@%s", act.a, act.pkg) + return fmt.Sprintf("%s@%s", act.Analyzer, act.Package) } // NOTE(ldez) altered version of `func (act *action) execOnce()`. @@ -72,20 +76,26 @@ func (act *action) analyze() { return } - defer func(now time.Time) { - analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) - }(time.Now()) + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + t0 := time.Now() + defer func() { + act.Duration = time.Since(t0) + analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.runner.prefix, act.Analyzer.Name, act.Package.Name, time.Since(t0)) + }() // Report an error if any dependency failures. var depErrors error - for _, dep := range act.deps { - if dep.err != nil { - depErrors = errors.Join(depErrors, errors.Unwrap(dep.err)) + for _, dep := range act.Deps { + if dep.Err != nil { + depErrors = errors.Join(depErrors, errors.Unwrap(dep.Err)) } } - if depErrors != nil { - act.err = fmt.Errorf("failed prerequisites: %w", depErrors) + act.Err = fmt.Errorf("failed prerequisites: %w", depErrors) return } @@ -94,15 +104,14 @@ func (act *action) analyze() { inputs := make(map[*analysis.Analyzer]any) act.objectFacts = make(map[objectFactKey]analysis.Fact) act.packageFacts = make(map[packageFactKey]analysis.Fact) - startedAt := time.Now() - - for _, dep := range act.deps { - if dep.pkg == act.pkg { + for _, dep := range act.Deps { + if dep.Package == act.Package { // Same package, different analysis (horizontal edge): // in-memory outputs of prerequisite analyzers // become inputs to this analysis pass. - inputs[dep.a] = dep.result - } else if dep.a == act.a { // (always true) + inputs[dep.Analyzer] = dep.Result + + } else if dep.Analyzer == act.Analyzer { // (always true) // Same analysis, different package (vertical edge): // serialized facts produced by prerequisite analysis // become available to this analysis pass. @@ -110,10 +119,20 @@ func (act *action) analyze() { } } - factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt)) + // NOTE(ldez) this is not compatible with our implementation. + // Quick (nonexhaustive) check that the correct go/packages mode bits were used. + // (If there were errors, all bets are off.) + // if pkg := act.Package; pkg.Errors == nil { + // if pkg.Name == "" || pkg.PkgPath == "" || pkg.Types == nil || pkg.Fset == nil || pkg.TypesSizes == nil { + // panic(fmt.Sprintf("packages must be loaded with packages.LoadSyntax mode: Name: %v, PkgPath: %v, Types: %v, Fset: %v, TypesSizes: %v", + // pkg.Name == "", pkg.PkgPath == "", pkg.Types == nil, pkg.Fset == nil, pkg.TypesSizes == nil)) + // } + // } + + factsDebugf("%s: Inherited facts in %s", act, time.Since(t0)) module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. - if mod := act.pkg.Module; mod != nil { + if mod := act.Package.Module; mod != nil { module.Path = mod.Path module.Version = mod.Version module.GoVersion = mod.GoVersion @@ -121,79 +140,104 @@ func (act *action) analyze() { // Run the analysis. pass := &analysis.Pass{ - Analyzer: act.a, - Fset: act.pkg.Fset, - Files: act.pkg.Syntax, - OtherFiles: act.pkg.OtherFiles, - IgnoredFiles: act.pkg.IgnoredFiles, - Pkg: act.pkg.Types, - TypesInfo: act.pkg.TypesInfo, - TypesSizes: act.pkg.TypesSizes, - TypeErrors: act.pkg.TypeErrors, + Analyzer: act.Analyzer, + Fset: act.Package.Fset, + Files: act.Package.Syntax, + OtherFiles: act.Package.OtherFiles, + IgnoredFiles: act.Package.IgnoredFiles, + Pkg: act.Package.Types, + TypesInfo: act.Package.TypesInfo, + TypesSizes: act.Package.TypesSizes, + TypeErrors: act.Package.TypeErrors, Module: module, ResultOf: inputs, - Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, - ImportObjectFact: act.importObjectFact, + Report: func(d analysis.Diagnostic) { act.Diagnostics = append(act.Diagnostics, d) }, + ImportObjectFact: act.ObjectFact, ExportObjectFact: act.exportObjectFact, - ImportPackageFact: act.importPackageFact, + ImportPackageFact: act.PackageFact, ExportPackageFact: act.exportPackageFact, - AllObjectFacts: act.allObjectFacts, - AllPackageFacts: act.allPackageFacts, + AllObjectFacts: act.AllObjectFacts, + AllPackageFacts: act.AllPackageFacts, } - + pass.ReadFile = analysisinternal.MakeReadFile(pass) act.pass = pass - act.r.passToPkgGuard.Lock() - act.r.passToPkg[pass] = act.pkg - act.r.passToPkgGuard.Unlock() - if act.pkg.IllTyped { + act.runner.passToPkgGuard.Lock() + act.runner.passToPkg[pass] = act.Package + act.runner.passToPkgGuard.Unlock() + + act.Result, act.Err = func() (any, error) { + // NOTE(golangci-lint): // It looks like there should be !pass.Analyzer.RunDespiteErrors - // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, + // but govet's cgocall crashes on it. + // Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, // but it exits before it if packages.Load have failed. - act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg}) - } else { - startedAt = time.Now() + if act.Package.IllTyped { + return nil, fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.Package}) + } + + t1 := time.Now() - act.result, act.err = pass.Analyzer.Run(pass) + result, err := pass.Analyzer.Run(pass) + if err != nil { + return nil, err + } - analyzedIn := time.Since(startedAt) - if analyzedIn > time.Millisecond*10 { + analyzedIn := time.Since(t1) + if analyzedIn > 10*time.Millisecond { debugf("%s: run analyzer in %s", act, analyzedIn) } - } - // disallow calls after Run + // correct result type? + if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want { + return nil, fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + + // resolve diagnostic URLs + for i := range act.Diagnostics { + url, err := analysisflags.ResolveURL(act.Analyzer, act.Diagnostics[i]) + if err != nil { + return nil, err + } + act.Diagnostics[i].URL = url + } + return result, nil + }() + + // Help detect (disallowed) calls after Run. pass.ExportObjectFact = nil pass.ExportPackageFact = nil err := act.persistFactsToCache() if err != nil { - act.r.log.Warnf("Failed to persist facts to cache: %s", err) + act.runner.log.Warnf("Failed to persist facts to cache: %s", err) } } -// NOTE(ldez) altered: logger; serialize. +// NOTE(ldez) altered: logger; sanityCheck. // inheritFacts populates act.facts with // those it obtains from its dependency, dep. func inheritFacts(act, dep *action) { - const serialize = false + const sanityCheck = false for key, fact := range dep.objectFacts { // Filter out facts related to objects // that are irrelevant downstream // (equivalently: not in the compiler export data). - if !exportedFrom(key.obj, dep.pkg.Types) { + if !exportedFrom(key.obj, dep.Package.Types) { factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) continue } // Optionally serialize/deserialize fact // to verify that it works across address spaces. - if serialize { + if sanityCheck { encodedFact, err := codeFact(fact) if err != nil { - act.r.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) + act.runner.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) } fact = encodedFact } @@ -207,14 +251,26 @@ func inheritFacts(act, dep *action) { // TODO: filter out facts that belong to // packages not mentioned in the export data // to prevent side channels. + // + // The Pass.All{Object,Package}Facts accessors expose too much: + // all facts, of all types, for all dependencies in the action + // graph. Not only does the representation grow quadratically, + // but it violates the separate compilation paradigm, allowing + // analysis implementations to communicate with indirect + // dependencies that are not mentioned in the export data. + // + // It's not clear how to fix this short of a rather expensive + // filtering step after each action that enumerates all the + // objects that would appear in export data, and deletes + // facts associated with objects not in this set. // Optionally serialize/deserialize fact // to verify that it works across address spaces // and is deterministic. - if serialize { + if sanityCheck { encodedFact, err := codeFact(fact) if err != nil { - act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + act.runner.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) } fact = encodedFact } @@ -225,7 +281,7 @@ func inheritFacts(act, dep *action) { } } -// NOTE(ldez) no alteration. +// NOTE(ldez) altered: `new` is renamed to `newFact`. // codeFact encodes then decodes a fact, // just to exercise that logic. func codeFact(fact analysis.Fact) (analysis.Fact, error) { @@ -259,7 +315,7 @@ func codeFact(fact analysis.Fact) (analysis.Fact, error) { // This includes not just the exported members of pkg, but also unexported // constants, types, fields, and methods, perhaps belonging to other packages, // that find there way into the API. -// This is an over-approximation of the more accurate approach used by +// This is an overapproximation of the more accurate approach used by // gc export data, which walks the type graph, but it's much simpler. // // TODO(adonovan): do more accurate filtering by walking the type graph. @@ -282,11 +338,14 @@ func exportedFrom(obj types.Object, pkg *types.Package) bool { return false // Nil, Builtin, Label, or PkgName } -// NOTE(ldez) altered: logger; `act.factType` -// importObjectFact implements Pass.ImportObjectFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// importObjectFact copies the fact value to *ptr. -func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { +// NOTE(ldez) altered: logger; `act.factType`. +// ObjectFact retrieves a fact associated with obj, +// and returns true if one was found. +// Given a value ptr of type *T, where *T satisfies Fact, +// ObjectFact copies the value to *ptr. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *action) ObjectFact(obj types.Object, ptr analysis.Fact) bool { if obj == nil { panic("nil object") } @@ -298,12 +357,16 @@ func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { return false } -// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. +// NOTE(ldez) altered: logger; `act.factType`. // exportObjectFact implements Pass.ExportObjectFact. func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { - if obj.Pkg() != act.pkg.Types { - act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", - act.a, act.pkg, obj, fact) + if act.pass.ExportObjectFact == nil { + act.runner.log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.Package.Types { + act.runner.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.Analyzer, act.Package, obj, fact) } key := objectFactKey{obj, act.factType(fact)} @@ -312,12 +375,16 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { objstr := types.ObjectString(obj, (*types.Package).Name) factsExportDebugf("%s: object %s has fact %s\n", - act.pkg.Fset.Position(obj.Pos()), objstr, fact) + act.Package.Fset.Position(obj.Pos()), objstr, fact) } } // NOTE(ldez) no alteration. -func (act *action) allObjectFacts() []analysis.ObjectFact { +// AllObjectFacts returns a new slice containing all object facts of +// the analysis's FactTypes in unspecified order. +// +// See documentation at AllObjectFacts field of [analysis.Pass]. +func (act *action) AllObjectFacts() []analysis.ObjectFact { facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) for k := range act.objectFacts { facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) @@ -325,11 +392,12 @@ func (act *action) allObjectFacts() []analysis.ObjectFact { return facts } -// NOTE(ldez) altered: `act.factType` -// importPackageFact implements Pass.ImportPackageFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// fact copies the fact value to *ptr. -func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { +// NOTE(ldez) altered: `act.factType`. +// PackageFact retrieves a fact associated with package pkg, +// which must be this package or one of its dependencies. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *action) PackageFact(pkg *types.Package, ptr analysis.Fact) bool { if pkg == nil { panic("nil package") } @@ -341,30 +409,38 @@ func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool return false } -// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. +// NOTE(ldez) altered: logger; `act.factType`. // exportPackageFact implements Pass.ExportPackageFact. func (act *action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + act.runner.log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + key := packageFactKey{act.pass.Pkg, act.factType(fact)} act.packageFacts[key] = fact // clobber any existing entry factsDebugf("%s: package %s has fact %s\n", - act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + act.Package.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) } // NOTE(ldez) altered: add receiver to handle logs. func (act *action) factType(fact analysis.Fact) reflect.Type { t := reflect.TypeOf(fact) if t.Kind() != reflect.Ptr { - act.r.log.Fatalf("invalid Fact type: got %T, want pointer", fact) + act.runner.log.Fatalf("invalid Fact type: got %T, want pointer", fact) } return t } // NOTE(ldez) no alteration. -func (act *action) allPackageFacts() []analysis.PackageFact { +// AllPackageFacts returns a new slice containing all package +// facts of the analysis's FactTypes in unspecified order. +// +// See documentation at AllPackageFacts field of [analysis.Pass]. +func (act *action) AllPackageFacts() []analysis.PackageFact { facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) - for k := range act.packageFacts { - facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) + for k, fact := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: fact}) } return facts } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go index 44d676958..fca4b8c3a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go @@ -67,7 +67,7 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { // Unblock depending on actions and propagate error. for _, act := range lp.actions { close(act.analysisDoneCh) - act.err = werr + act.Err = werr } return } @@ -125,13 +125,14 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { pkg.IllTyped = true pkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Instances: make(map[*ast.Ident]types.Instance), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + Types: make(map[ast.Expr]types.TypeAndValue), + Instances: make(map[*ast.Ident]types.Instance), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + Scopes: make(map[ast.Node]*types.Scope), + FileVersions: make(map[*ast.File]string), } importer := func(path string) (*types.Package, error) { @@ -363,12 +364,12 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { pass.ImportPackageFact = nil pass.ExportPackageFact = nil act.pass = nil - act.deps = nil - if act.result != nil { + act.Deps = nil + if act.Result != nil { if isMemoryDebug { - debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result)) + debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.Result)) } - act.result = nil + act.Result = nil } } @@ -399,7 +400,7 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { for _, act := range lp.actions { if !lp.isInitial { - act.pkg = nil + act.Package = nil } act.packageFacts = nil act.objectFacts = nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go index a9aee03a2..3a9a35dec 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go @@ -2,6 +2,8 @@ package goanalysis import ( "fmt" + "go/token" + "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -81,6 +83,7 @@ func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Iss func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { var issues []result.Issue + for i := range diags { diag := &diags[i] linterName := linterNameBuilder(diag) @@ -92,11 +95,43 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) } + var suggestedFixes []analysis.SuggestedFix + + for _, sf := range diag.SuggestedFixes { + // Skip suggested fixes on cgo files. + // The related error is: "diff has out-of-bounds edits" + // This is a temporary workaround. + if !strings.HasSuffix(diag.File.Name(), ".go") { + continue + } + + nsf := analysis.SuggestedFix{Message: sf.Message} + + for _, edit := range sf.TextEdits { + end := edit.End + + if !end.IsValid() { + end = edit.Pos + } + + // To be applied the positions need to be "adjusted" based on the file. + // This is the difference between the "displayed" positions and "effective" positions. + nsf.TextEdits = append(nsf.TextEdits, analysis.TextEdit{ + Pos: token.Pos(diag.File.Offset(edit.Pos)), + End: token.Pos(diag.File.Offset(end)), + NewText: edit.NewText, + }) + } + + suggestedFixes = append(suggestedFixes, nsf) + } + issues = append(issues, result.Issue{ - FromLinter: linterName, - Text: text, - Pos: diag.Position, - Pkg: diag.Pkg, + FromLinter: linterName, + Text: text, + Pos: diag.Position, + Pkg: diag.Pkg, + SuggestedFixes: suggestedFixes, }) if len(diag.Related) > 0 { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go index 8c244688b..4366155b0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go @@ -48,7 +48,7 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages. Severity: i.Severity, Pos: i.Pos, LineRange: i.LineRange, - Replacement: i.Replacement, + SuggestedFixes: i.SuggestedFixes, ExpectNoLint: i.ExpectNoLint, ExpectedNoLintLinter: i.ExpectedNoLintLinter, }) @@ -123,7 +123,7 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, Severity: issue.Severity, Pos: issue.Pos, LineRange: issue.LineRange, - Replacement: issue.Replacement, + SuggestedFixes: issue.SuggestedFixes, Pkg: pkg, ExpectNoLint: issue.ExpectNoLint, ExpectedNoLintLinter: issue.ExpectedNoLintLinter, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/analyzer.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/analyzer.go new file mode 100644 index 000000000..c0ea66e7e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/analyzer.go @@ -0,0 +1,55 @@ +package goformatters + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "github.com/rogpeppe/go-internal/diff" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/goformatters/internal" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +// NewAnalyzer converts a [Formatter] to an [analysis.Analyzer]. +func NewAnalyzer(logger logutils.Log, doc string, formatter Formatter) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: formatter.Name(), + Doc: doc, + Run: func(pass *analysis.Pass) (any, error) { + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } + + input, err := os.ReadFile(position.Filename) + if err != nil { + return nil, fmt.Errorf("unable to open file %s: %w", position.Filename, err) + } + + output, err := formatter.Format(position.Filename, input) + if err != nil { + return nil, fmt.Errorf("error while running %s: %w", formatter.Name(), err) + } + + if !bytes.Equal(input, output) { + newName := filepath.ToSlash(position.Filename) + oldName := newName + ".orig" + + patch := diff.Diff(oldName, input, newName, output) + + err = internal.ExtractDiagnosticFromPatch(pass, file, patch, logger) + if err != nil { + return nil, fmt.Errorf("can't extract issues from %s diff output %q: %w", formatter.Name(), patch, err) + } + } + } + + return nil, nil + }, + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go new file mode 100644 index 000000000..c8953ad3b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go @@ -0,0 +1,6 @@ +package goformatters + +type Formatter interface { + Name() string + Format(filename string, src []byte) ([]byte, error) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go new file mode 100644 index 000000000..f28b5b98a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go @@ -0,0 +1,71 @@ +package gci + +import ( + "context" + "fmt" + + gcicfg "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/gci" + "github.com/daixiang0/gci/pkg/log" + "github.com/ldez/grignotin/gomod" + + "github.com/golangci/golangci-lint/pkg/config" + gcicfgi "github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/config" + "github.com/golangci/golangci-lint/pkg/goformatters/internal" +) + +const Name = "gci" + +type Formatter struct { + config *gcicfg.Config +} + +func New(settings *config.GciSettings) (*Formatter, error) { + log.InitLogger() + _ = log.L().Sync() + + modPath, err := gomod.GetModulePath(context.Background()) + if err != nil { + internal.FormatterLogger.Errorf("gci: %v", err) + } + + cfg := gcicfgi.YamlConfig{ + Cfg: gcicfg.BoolConfig{ + NoInlineComments: settings.NoInlineComments, + NoPrefixComments: settings.NoPrefixComments, + SkipGenerated: settings.SkipGenerated, + CustomOrder: settings.CustomOrder, + NoLexOrder: settings.NoLexOrder, + }, + SectionStrings: settings.Sections, + ModPath: modPath, + } + + if settings.LocalPrefixes != "" { + cfg.SectionStrings = []string{ + "standard", + "default", + fmt.Sprintf("prefix(%s)", settings.LocalPrefixes), + } + } + + parsedCfg, err := cfg.Parse() + if err != nil { + return nil, err + } + + return &Formatter{config: &gcicfg.Config{ + BoolConfig: parsedCfg.BoolConfig, + Sections: parsedCfg.Sections, + SectionSeparators: parsedCfg.SectionSeparators, + }}, nil +} + +func (*Formatter) Name() string { + return Name +} + +func (f *Formatter) Format(filename string, src []byte) ([]byte, error) { + _, formatted, err := gci.LoadFormat(src, filename, *f.config) + return formatted, err +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/LICENSE b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/LICENSE new file mode 100644 index 000000000..e1292f738 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, Xiang Dai +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/config/config.go new file mode 100644 index 000000000..8140d96bf --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/config/config.go @@ -0,0 +1,107 @@ +package config + +import ( + "sort" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/section" + sectioni "github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section" +) + +var defaultOrder = map[string]int{ + section.StandardType: 0, + section.DefaultType: 1, + section.CustomType: 2, + section.BlankType: 3, + section.DotType: 4, + section.AliasType: 5, + section.LocalModuleType: 6, +} + +type Config struct { + config.BoolConfig + Sections section.SectionList + SectionSeparators section.SectionList +} + +type YamlConfig struct { + Cfg config.BoolConfig `yaml:",inline"` + SectionStrings []string `yaml:"sections"` + SectionSeparatorStrings []string `yaml:"sectionseparators"` + + // Since history issue, Golangci-lint needs Analyzer to run and GCI add an Analyzer layer to integrate. + // The ModPath param is only from analyzer.go, no need to set it in all other places. + ModPath string `yaml:"-"` +} + +func (g YamlConfig) Parse() (*Config, error) { + var err error + + sections, err := sectioni.Parse(g.SectionStrings) + if err != nil { + return nil, err + } + if sections == nil { + sections = sectioni.DefaultSections() + } + if err := configureSections(sections, g.ModPath); err != nil { + return nil, err + } + + // if default order sorted sections + if !g.Cfg.CustomOrder { + sort.Slice(sections, func(i, j int) bool { + sectionI, sectionJ := sections[i].Type(), sections[j].Type() + + if g.Cfg.NoLexOrder || strings.Compare(sectionI, sectionJ) != 0 { + return defaultOrder[sectionI] < defaultOrder[sectionJ] + } + + return strings.Compare(sections[i].String(), sections[j].String()) < 0 + }) + } + + sectionSeparators, err := sectioni.Parse(g.SectionSeparatorStrings) + if err != nil { + return nil, err + } + if sectionSeparators == nil { + sectionSeparators = section.DefaultSectionSeparators() + } + + return &Config{g.Cfg, sections, sectionSeparators}, nil +} + +func ParseConfig(in string) (*Config, error) { + config := YamlConfig{} + + err := yaml.Unmarshal([]byte(in), &config) + if err != nil { + return nil, err + } + + gciCfg, err := config.Parse() + if err != nil { + return nil, err + } + + return gciCfg, nil +} + +// configureSections now only do golang module path finding. +// Since history issue, Golangci-lint needs Analyzer to run and GCI add an Analyzer layer to integrate. +// The path param is from analyzer.go, in all other places should pass empty string. +func configureSections(sections section.SectionList, path string) error { + for _, sec := range sections { + switch s := sec.(type) { + case *section.LocalModule: + if err := s.Configure(path); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/parser.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/parser.go new file mode 100644 index 000000000..9662cbd1a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/parser.go @@ -0,0 +1,51 @@ +package section + +import ( + "errors" + "fmt" + "strings" + + "github.com/daixiang0/gci/pkg/section" +) + +func Parse(data []string) (section.SectionList, error) { + if len(data) == 0 { + return nil, nil + } + + var list section.SectionList + var errString string + for _, d := range data { + s := strings.ToLower(d) + if len(s) == 0 { + return nil, nil + } + + if s == "default" { + list = append(list, section.Default{}) + } else if s == "standard" { + list = append(list, Standard{}) + } else if s == "newline" { + list = append(list, section.NewLine{}) + } else if strings.HasPrefix(s, "prefix(") && len(d) > 8 { + list = append(list, section.Custom{Prefix: d[7 : len(d)-1]}) + } else if strings.HasPrefix(s, "commentline(") && len(d) > 13 { + list = append(list, section.Custom{Prefix: d[12 : len(d)-1]}) + } else if s == "dot" { + list = append(list, section.Dot{}) + } else if s == "blank" { + list = append(list, section.Blank{}) + } else if s == "alias" { + list = append(list, section.Alias{}) + } else if s == "localmodule" { + // pointer because we need to mutate the section at configuration time + list = append(list, §ion.LocalModule{}) + } else { + errString += fmt.Sprintf(" %s", s) + } + } + if errString != "" { + return nil, errors.New(fmt.Sprintf("invalid params:%s", errString)) + } + return list, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/section.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/section.go new file mode 100644 index 000000000..e9c663222 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/section.go @@ -0,0 +1,7 @@ +package section + +import "github.com/daixiang0/gci/pkg/section" + +func DefaultSections() section.SectionList { + return section.SectionList{Standard{}, section.Default{}} +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard.go new file mode 100644 index 000000000..26c7e9dc7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard.go @@ -0,0 +1,30 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const StandardType = "standard" + +type Standard struct{} + +func (s Standard) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if isStandard(spec.Path) { + return specificity.StandardMatch{} + } + return specificity.MisMatch{} +} + +func (s Standard) String() string { + return StandardType +} + +func (s Standard) Type() string { + return StandardType +} + +func isStandard(pkg string) bool { + _, ok := standardPackages[pkg] + return ok +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard_list.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard_list.go new file mode 100644 index 000000000..2fddded70 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/internal/section/standard_list.go @@ -0,0 +1,182 @@ +package section + +// Code generated based on go1.24.0 X:boringcrypto,arenas,synctest. DO NOT EDIT. + +var standardPackages = map[string]struct{}{ + "archive/tar": {}, + "archive/zip": {}, + "arena": {}, + "bufio": {}, + "bytes": {}, + "cmp": {}, + "compress/bzip2": {}, + "compress/flate": {}, + "compress/gzip": {}, + "compress/lzw": {}, + "compress/zlib": {}, + "container/heap": {}, + "container/list": {}, + "container/ring": {}, + "context": {}, + "crypto": {}, + "crypto/aes": {}, + "crypto/boring": {}, + "crypto/cipher": {}, + "crypto/des": {}, + "crypto/dsa": {}, + "crypto/ecdh": {}, + "crypto/ecdsa": {}, + "crypto/ed25519": {}, + "crypto/elliptic": {}, + "crypto/fips140": {}, + "crypto/hkdf": {}, + "crypto/hmac": {}, + "crypto/md5": {}, + "crypto/mlkem": {}, + "crypto/pbkdf2": {}, + "crypto/rand": {}, + "crypto/rc4": {}, + "crypto/rsa": {}, + "crypto/sha1": {}, + "crypto/sha256": {}, + "crypto/sha3": {}, + "crypto/sha512": {}, + "crypto/subtle": {}, + "crypto/tls": {}, + "crypto/tls/fipsonly": {}, + "crypto/x509": {}, + "crypto/x509/pkix": {}, + "database/sql": {}, + "database/sql/driver": {}, + "debug/buildinfo": {}, + "debug/dwarf": {}, + "debug/elf": {}, + "debug/gosym": {}, + "debug/macho": {}, + "debug/pe": {}, + "debug/plan9obj": {}, + "embed": {}, + "encoding": {}, + "encoding/ascii85": {}, + "encoding/asn1": {}, + "encoding/base32": {}, + "encoding/base64": {}, + "encoding/binary": {}, + "encoding/csv": {}, + "encoding/gob": {}, + "encoding/hex": {}, + "encoding/json": {}, + "encoding/pem": {}, + "encoding/xml": {}, + "errors": {}, + "expvar": {}, + "flag": {}, + "fmt": {}, + "go/ast": {}, + "go/build": {}, + "go/build/constraint": {}, + "go/constant": {}, + "go/doc": {}, + "go/doc/comment": {}, + "go/format": {}, + "go/importer": {}, + "go/parser": {}, + "go/printer": {}, + "go/scanner": {}, + "go/token": {}, + "go/types": {}, + "go/version": {}, + "hash": {}, + "hash/adler32": {}, + "hash/crc32": {}, + "hash/crc64": {}, + "hash/fnv": {}, + "hash/maphash": {}, + "html": {}, + "html/template": {}, + "image": {}, + "image/color": {}, + "image/color/palette": {}, + "image/draw": {}, + "image/gif": {}, + "image/jpeg": {}, + "image/png": {}, + "index/suffixarray": {}, + "io": {}, + "io/fs": {}, + "io/ioutil": {}, + "iter": {}, + "log": {}, + "log/slog": {}, + "log/syslog": {}, + "maps": {}, + "math": {}, + "math/big": {}, + "math/bits": {}, + "math/cmplx": {}, + "math/rand": {}, + "math/rand/v2": {}, + "mime": {}, + "mime/multipart": {}, + "mime/quotedprintable": {}, + "net": {}, + "net/http": {}, + "net/http/cgi": {}, + "net/http/cookiejar": {}, + "net/http/fcgi": {}, + "net/http/httptest": {}, + "net/http/httptrace": {}, + "net/http/httputil": {}, + "net/http/pprof": {}, + "net/mail": {}, + "net/netip": {}, + "net/rpc": {}, + "net/rpc/jsonrpc": {}, + "net/smtp": {}, + "net/textproto": {}, + "net/url": {}, + "os": {}, + "os/exec": {}, + "os/signal": {}, + "os/user": {}, + "path": {}, + "path/filepath": {}, + "plugin": {}, + "reflect": {}, + "regexp": {}, + "regexp/syntax": {}, + "runtime": {}, + "runtime/cgo": {}, + "runtime/coverage": {}, + "runtime/debug": {}, + "runtime/metrics": {}, + "runtime/pprof": {}, + "runtime/race": {}, + "runtime/trace": {}, + "slices": {}, + "sort": {}, + "strconv": {}, + "strings": {}, + "structs": {}, + "sync": {}, + "sync/atomic": {}, + "syscall": {}, + "testing": {}, + "testing/fstest": {}, + "testing/iotest": {}, + "testing/quick": {}, + "testing/slogtest": {}, + "testing/synctest": {}, + "text/scanner": {}, + "text/tabwriter": {}, + "text/template": {}, + "text/template/parse": {}, + "time": {}, + "time/tzdata": {}, + "unicode": {}, + "unicode/utf16": {}, + "unicode/utf8": {}, + "unique": {}, + "unsafe": {}, + "weak": {}, +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go new file mode 100644 index 000000000..9005c751d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go @@ -0,0 +1,35 @@ +package gofmt + +import ( + "github.com/golangci/gofmt/gofmt" + + "github.com/golangci/golangci-lint/pkg/config" +) + +const Name = "gofmt" + +type Formatter struct { + options gofmt.Options +} + +func New(settings *config.GoFmtSettings) *Formatter { + options := gofmt.Options{} + + if settings != nil { + options.NeedSimplify = settings.Simplify + + for _, rule := range settings.RewriteRules { + options.RewriteRules = append(options.RewriteRules, gofmt.RewriteRule(rule)) + } + } + + return &Formatter{options: options} +} + +func (*Formatter) Name() string { + return Name +} + +func (f *Formatter) Format(filename string, src []byte) ([]byte, error) { + return gofmt.Source(filename, src, f.options) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go new file mode 100644 index 000000000..7c548a2af --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go @@ -0,0 +1,46 @@ +package gofumpt + +import ( + "strings" + + gofumpt "mvdan.cc/gofumpt/format" + + "github.com/golangci/golangci-lint/pkg/config" +) + +const Name = "gofumpt" + +type Formatter struct { + options gofumpt.Options +} + +func New(settings *config.GofumptSettings, goVersion string) *Formatter { + var options gofumpt.Options + + if settings != nil { + options = gofumpt.Options{ + LangVersion: getLangVersion(goVersion), + ModulePath: settings.ModulePath, + ExtraRules: settings.ExtraRules, + } + } + + return &Formatter{options: options} +} + +func (*Formatter) Name() string { + return Name +} + +func (f *Formatter) Format(_ string, src []byte) ([]byte, error) { + return gofumpt.Source(src, f.options) +} + +func getLangVersion(v string) string { + if v == "" { + // TODO: defaults to "1.15", in the future (v2) must be removed. + return "go1.15" + } + + return "go" + strings.TrimPrefix(v, "go") +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go new file mode 100644 index 000000000..fa0f1fc4f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go @@ -0,0 +1,28 @@ +package goimports + +import ( + "golang.org/x/tools/imports" + + "github.com/golangci/golangci-lint/pkg/config" +) + +const Name = "goimports" + +type Formatter struct{} + +func New(settings *config.GoImportsSettings) *Formatter { + if settings != nil { + imports.LocalPrefix = settings.LocalPrefixes + } + + return &Formatter{} +} + +func (*Formatter) Name() string { + return Name +} + +func (*Formatter) Format(filename string, src []byte) ([]byte, error) { + // The `imports.LocalPrefix` (`settings.LocalPrefixes`) is a global var. + return imports.Process(filename, src, nil) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/commons.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/commons.go new file mode 100644 index 000000000..5320e786b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/commons.go @@ -0,0 +1,6 @@ +package internal + +import "github.com/golangci/golangci-lint/pkg/logutils" + +// FormatterLogger must be used only when the context logger is not available. +var FormatterLogger = logutils.NewStderrLog(logutils.DebugKeyFormatter) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/diff.go new file mode 100644 index 000000000..75d65b73a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/internal/diff.go @@ -0,0 +1,274 @@ +package internal + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "slices" + "strings" + + diffpkg "github.com/sourcegraph/go-diff/diff" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type Change struct { + From, To int + NewLines []string +} + +type diffLineType string + +const ( + diffLineAdded diffLineType = "added" + diffLineOriginal diffLineType = "original" + diffLineDeleted diffLineType = "deleted" +) + +type diffLine struct { + originalNumber int // 1-based original line number + typ diffLineType + data string // "+" or "-" stripped line +} + +type hunkChangesParser struct { + // needed because we merge currently added lines with the last original line + lastOriginalLine *diffLine + + // if the first line of diff is an adding we save all additions to replacementLinesToPrepend + replacementLinesToPrepend []string + + log logutils.Log + + changes []Change +} + +func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { + lines := parseDiffLines(h) + + for i := 0; i < len(lines); { + line := lines[i] + + if line.typ == diffLineOriginal { + p.handleOriginalLine(lines, line, &i) + continue + } + + var deletedLines []diffLine + for ; i < len(lines) && lines[i].typ == diffLineDeleted; i++ { + deletedLines = append(deletedLines, lines[i]) + } + + var addedLines []string + for ; i < len(lines) && lines[i].typ == diffLineAdded; i++ { + addedLines = append(addedLines, lines[i].data) + } + + if len(deletedLines) != 0 { + p.handleDeletedLines(deletedLines, addedLines) + continue + } + + // no deletions, only additions + p.handleAddedOnlyLines(addedLines) + } + + if len(p.replacementLinesToPrepend) != 0 { + p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", lines) + return nil + } + + return p.changes +} + +func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, i *int) { + if len(p.replacementLinesToPrepend) == 0 { + p.lastOriginalLine = &line + *i++ + return + } + + // check following added lines for the case: + // + added line 1 + // original line + // + added line 2 + + *i++ + var followingAddedLines []string + for ; *i < len(lines) && lines[*i].typ == diffLineAdded; *i++ { + followingAddedLines = append(followingAddedLines, lines[*i].data) + } + + change := Change{ + From: line.originalNumber, + To: line.originalNumber, + NewLines: slices.Concat(p.replacementLinesToPrepend, []string{line.data}, followingAddedLines), + } + p.changes = append(p.changes, change) + + p.replacementLinesToPrepend = nil + p.lastOriginalLine = &line +} + +func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) { + change := Change{ + From: deletedLines[0].originalNumber, + To: deletedLines[len(deletedLines)-1].originalNumber, + } + + switch { + case len(addedLines) != 0: + change.NewLines = slices.Concat(p.replacementLinesToPrepend, addedLines) + p.replacementLinesToPrepend = nil + + case len(p.replacementLinesToPrepend) != 0: + // delete-only change with possible prepending + change.NewLines = slices.Clone(p.replacementLinesToPrepend) + p.replacementLinesToPrepend = nil + } + + p.changes = append(p.changes, change) +} + +func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { + if p.lastOriginalLine == nil { + // the first line is added; the diff looks like: + // 1. + ... + // 2. - ... + // or + // 1. + ... + // 2. ... + + p.replacementLinesToPrepend = addedLines + + return + } + + // add-only change merged into the last original line with possible prepending + change := Change{ + From: p.lastOriginalLine.originalNumber, + To: p.lastOriginalLine.originalNumber, + NewLines: slices.Concat(p.replacementLinesToPrepend, []string{p.lastOriginalLine.data}, addedLines), + } + + p.changes = append(p.changes, change) + + p.replacementLinesToPrepend = nil +} + +func parseDiffLines(h *diffpkg.Hunk) []diffLine { + lines := bytes.Split(h.Body, []byte{'\n'}) + + currentOriginalLineNumber := int(h.OrigStartLine) + + var diffLines []diffLine + + for i, line := range lines { + dl := diffLine{ + originalNumber: currentOriginalLineNumber, + } + + if i == len(lines)-1 && len(line) == 0 { + // handle last \n: don't add an empty original line + break + } + + lineStr := string(line) + + switch { + case strings.HasPrefix(lineStr, "-"): + dl.typ = diffLineDeleted + dl.data = strings.TrimPrefix(lineStr, "-") + currentOriginalLineNumber++ + + case strings.HasPrefix(lineStr, "+"): + dl.typ = diffLineAdded + dl.data = strings.TrimPrefix(lineStr, "+") + + default: + dl.typ = diffLineOriginal + dl.data = strings.TrimPrefix(lineStr, " ") + currentOriginalLineNumber++ + } + + diffLines = append(diffLines, dl) + } + + // if > 0, then the original file had a 'No newline at end of file' mark + if h.OrigNoNewlineAt > 0 { + dl := diffLine{ + originalNumber: currentOriginalLineNumber + 1, + typ: diffLineAdded, + data: "", + } + diffLines = append(diffLines, dl) + } + + return diffLines +} + +func ExtractDiagnosticFromPatch( + pass *analysis.Pass, + file *ast.File, + patch []byte, + logger logutils.Log, +) error { + diffs, err := diffpkg.ParseMultiFileDiff(patch) + if err != nil { + return fmt.Errorf("can't parse patch: %w", err) + } + + if len(diffs) == 0 { + return fmt.Errorf("got no diffs from patch parser: %s", patch) + } + + ft := pass.Fset.File(file.Pos()) + + adjLine := pass.Fset.PositionFor(file.Pos(), false).Line - pass.Fset.PositionFor(file.Pos(), true).Line + + for _, d := range diffs { + if len(d.Hunks) == 0 { + logger.Warnf("Got no hunks in diff %+v", d) + continue + } + + for _, hunk := range d.Hunks { + p := hunkChangesParser{log: logger} + + changes := p.parse(hunk) + + for _, change := range changes { + pass.Report(toDiagnostic(ft, change, adjLine)) + } + } + } + + return nil +} + +func toDiagnostic(ft *token.File, change Change, adjLine int) analysis.Diagnostic { + from := change.From + adjLine + if from > ft.LineCount() { + from = ft.LineCount() + } + + start := ft.LineStart(from) + + end := goanalysis.EndOfLinePos(ft, change.To+adjLine) + + return analysis.Diagnostic{ + Pos: start, + End: end, + Message: "File is not properly formatted", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(strings.Join(change.NewLines, "\n")), + }}, + }}, + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go new file mode 100644 index 000000000..d66878c7a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go @@ -0,0 +1,74 @@ +package goformatters + +import ( + "bytes" + "fmt" + "go/format" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goformatters/gci" + "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" + "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" + "github.com/golangci/golangci-lint/pkg/goformatters/goimports" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type MetaFormatter struct { + log logutils.Log + formatters []Formatter +} + +func NewMetaFormatter(log logutils.Log, cfg *config.Config, enabledLinters map[string]*linter.Config) (*MetaFormatter, error) { + m := &MetaFormatter{log: log} + + if _, ok := enabledLinters[gofmt.Name]; ok { + m.formatters = append(m.formatters, gofmt.New(&cfg.LintersSettings.Gofmt)) + } + + if _, ok := enabledLinters[gofumpt.Name]; ok { + m.formatters = append(m.formatters, gofumpt.New(&cfg.LintersSettings.Gofumpt, cfg.Run.Go)) + } + + if _, ok := enabledLinters[goimports.Name]; ok { + m.formatters = append(m.formatters, goimports.New(&cfg.LintersSettings.Goimports)) + } + + // gci is a last because the only goal of gci is to handle imports. + if _, ok := enabledLinters[gci.Name]; ok { + formatter, err := gci.New(&cfg.LintersSettings.Gci) + if err != nil { + return nil, fmt.Errorf("gci: creating formatter: %w", err) + } + + m.formatters = append(m.formatters, formatter) + } + + return m, nil +} + +func (m *MetaFormatter) Format(filename string, src []byte) []byte { + if len(m.formatters) == 0 { + data, err := format.Source(src) + if err != nil { + m.log.Warnf("(fmt) formatting file %s: %v", filename, err) + return src + } + + return data + } + + data := bytes.Clone(src) + + for _, formatter := range m.formatters { + formatted, err := formatter.Format(filename, data) + if err != nil { + m.log.Warnf("(%s) formatting file %s: %v", formatter.Name(), filename, err) + continue + } + + data = formatted + } + + return data +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go index 653a2d514..ccc58fee4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go @@ -9,12 +9,12 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) -func New(setting *config.AsasalintSettings) *goanalysis.Linter { +func New(settings *config.AsasalintSettings) *goanalysis.Linter { cfg := asasalint.LinterSetting{} - if setting != nil { - cfg.Exclude = setting.Exclude - cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions - cfg.IgnoreTest = setting.IgnoreTest + if settings != nil { + cfg.Exclude = settings.Exclude + cfg.NoBuiltinExclusions = !settings.UseBuiltinExclusions + cfg.IgnoreTest = settings.IgnoreTest } a, err := asasalint.NewAnalyzer(cfg) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go index 4ced901e8..c6315965c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go @@ -10,42 +10,42 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { +func New(settings *config.BiDiChkSettings) *goanalysis.Linter { a := bidichk.NewAnalyzer() - cfgMap := map[string]map[string]any{} - if cfg != nil { + cfg := map[string]map[string]any{} + if settings != nil { var opts []string - if cfg.LeftToRightEmbedding { + if settings.LeftToRightEmbedding { opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING") } - if cfg.RightToLeftEmbedding { + if settings.RightToLeftEmbedding { opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING") } - if cfg.PopDirectionalFormatting { + if settings.PopDirectionalFormatting { opts = append(opts, "POP-DIRECTIONAL-FORMATTING") } - if cfg.LeftToRightOverride { + if settings.LeftToRightOverride { opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE") } - if cfg.RightToLeftOverride { + if settings.RightToLeftOverride { opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE") } - if cfg.LeftToRightIsolate { + if settings.LeftToRightIsolate { opts = append(opts, "LEFT-TO-RIGHT-ISOLATE") } - if cfg.RightToLeftIsolate { + if settings.RightToLeftIsolate { opts = append(opts, "RIGHT-TO-LEFT-ISOLATE") } - if cfg.FirstStrongIsolate { + if settings.FirstStrongIsolate { opts = append(opts, "FIRST-STRONG-ISOLATE") } - if cfg.PopDirectionalIsolate { + if settings.PopDirectionalIsolate { opts = append(opts, "POP-DIRECTIONAL-ISOLATE") } - cfgMap[a.Name] = map[string]any{ + cfg[a.Name] = map[string]any{ "disallowed-runes": strings.Join(opts, ","), } } @@ -54,6 +54,6 @@ func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { a.Name, "Checks for dangerous unicode character sequences", []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go index f39814edc..c520e88db 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go @@ -12,7 +12,7 @@ func New() *goanalysis.Linter { return goanalysis.NewLinter( a.Name, - "checks whether HTTP response body is closed successfully", + a.Doc, []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go index 13baba5a6..772b5601c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go @@ -33,5 +33,5 @@ func New(settings *config.Cyclop) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, cfg, - ).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard/depguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard/depguard.go index d2aedf252..194405999 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard/depguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard/depguard.go @@ -1,22 +1,30 @@ package depguard import ( + "strings" + "github.com/OpenPeeDeeP/depguard/v2" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" ) -func New(settings *config.DepGuardSettings) *goanalysis.Linter { +func New(settings *config.DepGuardSettings, basePath string) *goanalysis.Linter { conf := depguard.LinterSettings{} if settings != nil { for s, rule := range settings.Rules { + var extendedPatterns []string + for _, file := range rule.Files { + extendedPatterns = append(extendedPatterns, strings.ReplaceAll(file, internal.PlaceholderBasePath, basePath)) + } + list := &depguard.List{ ListMode: rule.ListMode, - Files: rule.Files, + Files: extendedPatterns, Allow: rule.Allow, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go index 49108f4f1..afa8152fa 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go @@ -1,41 +1,26 @@ package dogsled import ( - "fmt" "go/ast" - "go/token" - "sync" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "dogsled" func New(settings *config.DogsledSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runDogsled(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil + return run(pass, settings.MaxBlankIdentifiers) }, + Requires: []*analysis.Analyzer{inspect.Analyzer}, } return goanalysis.NewLinter( @@ -43,68 +28,51 @@ func New(settings *config.DogsledSettings) *goanalysis.Linter { "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue { - var reports []goanalysis.Issue - for _, f := range pass.Files { - v := &returnsVisitor{ - maxBlanks: settings.MaxBlankIdentifiers, - f: pass.Fset, - } - - ast.Walk(v, f) - - for i := range v.issues { - reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass)) - } - } - - return reports -} - -type returnsVisitor struct { - f *token.FileSet - maxBlanks int - issues []result.Issue -} - -func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { - funcDecl, ok := node.(*ast.FuncDecl) +func run(pass *analysis.Pass, maxBlanks int) (any, error) { + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !ok { - return v + return nil, nil } - if funcDecl.Body == nil { - return v + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), } - for _, expr := range funcDecl.Body.List { - assgnStmt, ok := expr.(*ast.AssignStmt) + insp.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) if !ok { - continue + return + } + + if funcDecl.Body == nil { + return } - numBlank := 0 - for _, left := range assgnStmt.Lhs { - ident, ok := left.(*ast.Ident) + for _, expr := range funcDecl.Body.List { + assgnStmt, ok := expr.(*ast.AssignStmt) if !ok { continue } - if ident.Name == "_" { - numBlank++ + + numBlank := 0 + for _, left := range assgnStmt.Lhs { + ident, ok := left.(*ast.Ident) + if !ok { + continue + } + if ident.Name == "_" { + numBlank++ + } } - } - if numBlank > v.maxBlanks { - v.issues = append(v.issues, result.Issue{ - FromLinter: linterName, - Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), - Pos: v.f.Position(assgnStmt.Pos()), - }) + if numBlank > maxBlanks { + pass.Reportf(assgnStmt.Pos(), "declaration has %v blank identifiers", numBlank) + } } - } - return v + }) + + return nil, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go index 7abcb4c4f..6d1a9809c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go @@ -5,7 +5,7 @@ import ( "go/token" "sync" - duplAPI "github.com/golangci/dupl" + duplAPI "github.com/golangci/dupl/lib" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -45,7 +45,7 @@ func New(settings *config.DuplSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, - "Tool for code clone detection", + "Detects duplicate fragments of code.", []*analysis.Analyzer{analyzer}, nil, ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { @@ -54,9 +54,7 @@ func New(settings *config.DuplSettings) *goanalysis.Linter { } func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - issues, err := duplAPI.Run(fileNames, settings.Threshold) + issues, err := duplAPI.Run(internal.GetGoFileNames(pass), settings.Threshold) if err != nil { return nil, err } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go index bba4fc9e1..a2bcc34d4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go @@ -10,14 +10,14 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(setting *config.DupWordSettings) *goanalysis.Linter { +func New(settings *config.DupWordSettings) *goanalysis.Linter { a := dupword.NewAnalyzer() - cfgMap := map[string]map[string]any{} - if setting != nil { - cfgMap[a.Name] = map[string]any{ - "keyword": strings.Join(setting.Keywords, ","), - "ignore": strings.Join(setting.Ignore, ","), + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "keyword": strings.Join(settings.Keywords, ","), + "ignore": strings.Join(settings.Ignore, ","), } } @@ -25,6 +25,6 @@ func New(setting *config.DupWordSettings) *goanalysis.Linter { a.Name, "checks for duplicate words in the source code", []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go index 9a8a2aa87..67a1b2ca8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go @@ -2,6 +2,7 @@ package errcheck import ( "bufio" + "cmp" "fmt" "os" "os/user" @@ -90,10 +91,7 @@ func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck text := "Error return value is not checked" if err.FuncName != "" { - code := err.SelectorName - if err.SelectorName == "" { - code = err.FuncName - } + code := cmp.Or(err.SelectorName, err.FuncName) text = fmt.Sprintf("Error return value of %s is not checked", internal.FormatCode(code, lintCtx.Cfg)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go index 8389a750c..506113d6d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go @@ -8,17 +8,17 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { +func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { a := errchkjson.NewAnalyzer() - cfgMap := map[string]map[string]any{} - cfgMap[a.Name] = map[string]any{ + cfg := map[string]map[string]any{} + cfg[a.Name] = map[string]any{ "omit-safe": true, } - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - "omit-safe": !cfg.CheckErrorFreeEncoding, - "report-no-exported": cfg.ReportNoExported, + if settings != nil { + cfg[a.Name] = map[string]any{ + "omit-safe": !settings.CheckErrorFreeEncoding, + "report-no-exported": settings.ReportNoExported, } } @@ -26,6 +26,6 @@ func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go index 86db8552d..14851adc2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go @@ -8,16 +8,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { +func New(settings *config.ErrorLintSettings) *goanalysis.Linter { var opts []errorlint.Option - if cfg != nil { - ae := toAllowPairs(cfg.AllowedErrors) + if settings != nil { + ae := toAllowPairs(settings.AllowedErrors) if len(ae) > 0 { opts = append(opts, errorlint.WithAllowedErrors(ae)) } - aew := toAllowPairs(cfg.AllowedErrorsWildcard) + aew := toAllowPairs(settings.AllowedErrorsWildcard) if len(aew) > 0 { opts = append(opts, errorlint.WithAllowedWildcard(aew)) } @@ -25,14 +25,14 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { a := errorlint.NewAnalyzer(opts...) - cfgMap := map[string]map[string]any{} + cfg := map[string]map[string]any{} - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - "errorf": cfg.Errorf, - "errorf-multi": cfg.ErrorfMulti, - "asserts": cfg.Asserts, - "comparison": cfg.Comparison, + if settings != nil { + cfg[a.Name] = map[string]any{ + "errorf": settings.Errorf, + "errorf-multi": settings.ErrorfMulti, + "asserts": settings.Asserts, + "comparison": settings.Comparison, } } @@ -41,7 +41,7 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { "errorlint is a linter for that can be used to find code "+ "that will cause problems with the error wrapping scheme introduced in Go 1.13.", []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref/exportloopref.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go similarity index 75% rename from vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref/exportloopref.go rename to vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go index e232f8045..2de8ea98c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref/exportloopref.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go @@ -1,14 +1,14 @@ -package exportloopref +package exptostd import ( - "github.com/kyoh86/exportloopref" + "github.com/ldez/exptostd" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/goanalysis" ) func New() *goanalysis.Linter { - a := exportloopref.Analyzer + a := exptostd.NewAnalyzer() return goanalysis.NewLinter( a.Name, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/fatcontext/fatcontext.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/fatcontext/fatcontext.go index 378025a8c..2ffacacd3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/fatcontext/fatcontext.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/fatcontext/fatcontext.go @@ -4,16 +4,25 @@ import ( "github.com/Crocmagnon/fatcontext/pkg/analyzer" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New() *goanalysis.Linter { - a := analyzer.Analyzer +func New(settings *config.FatcontextSettings) *goanalysis.Linter { + a := analyzer.NewAnalyzer() + + cfg := map[string]map[string]any{} + + if settings != nil { + cfg[a.Name] = map[string]any{ + analyzer.FlagCheckStructPointers: settings.CheckStructPointers, + } + } return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, - nil, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go index 3572b60c2..3b410359d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go @@ -2,40 +2,27 @@ package forbidigo import ( "fmt" - "sync" "github.com/ashanbrown/forbidigo/forbidigo" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "forbidigo" func New(settings *config.ForbidigoSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runForbidigo(pass, settings) + err := runForbidigo(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() return nil, nil }, } @@ -48,12 +35,10 @@ func New(settings *config.ForbidigoSettings) *goanalysis.Linter { "Forbids identifiers", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) { +func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error { options := []forbidigo.Option{ forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples), // disable "//permit" directives so only "//nolint" directives matters within golangci-lint @@ -66,38 +51,39 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]go for _, pattern := range settings.Forbid { buffer, err := pattern.MarshalString() if err != nil { - return nil, err + return err } + patterns = append(patterns, string(buffer)) } forbid, err := forbidigo.NewLinter(patterns, options...) if err != nil { - return nil, fmt.Errorf("failed to create linter %q: %w", linterName, err) + return fmt.Errorf("failed to create linter %q: %w", linterName, err) } - var issues []goanalysis.Issue for _, file := range pass.Files { runConfig := forbidigo.RunConfig{ Fset: pass.Fset, DebugLog: logutils.Debug(logutils.DebugKeyForbidigo), } - if settings != nil && settings.AnalyzeTypes { + + if settings.AnalyzeTypes { runConfig.TypesInfo = pass.TypesInfo } + hints, err := forbid.RunWithConfig(runConfig, file) if err != nil { - return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) + return fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) } for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: hint.Pos(), + Message: hint.Details(), + }) } } - return issues, nil + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert/forcetypeassert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert/forcetypeassert.go index 741b57cea..98abad7e0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert/forcetypeassert.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert/forcetypeassert.go @@ -15,5 +15,5 @@ func New() *goanalysis.Linter { "finds forced type assertions", []*analysis.Analyzer{a}, nil, - ).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go index e43339394..bdadcece4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go @@ -1,75 +1,33 @@ package funlen import ( - "go/token" - "strings" - "sync" - "github.com/ultraware/funlen" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -const linterName = "funlen" +type Config struct { + lineLimit int + stmtLimit int + ignoreComments bool +} func New(settings *config.FunlenSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - issues := runFunlen(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - }, + cfg := Config{} + if settings != nil { + cfg.lineLimit = settings.Lines + cfg.stmtLimit = settings.Statements + cfg.ignoreComments = !settings.IgnoreComments } + a := funlen.NewAnalyzer(cfg.lineLimit, cfg.stmtLimit, cfg.ignoreComments) + return goanalysis.NewLinter( - linterName, - "Tool for detection of long functions", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue { - var lintIssues []funlen.Message - for _, file := range pass.Files { - fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments) - lintIssues = append(lintIssues, fileIssues...) - } - - if len(lintIssues) == 0 { - return nil - } - - issues := make([]goanalysis.Issue, len(lintIssues)) - for k, i := range lintIssues { - issues[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: linterName, - }, pass) - } - - return issues + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go index a9afb6c89..b79f1a370 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go @@ -1,250 +1,33 @@ package gci import ( - "fmt" - "sort" - "strings" - "sync" - - gcicfg "github.com/daixiang0/gci/pkg/config" - "github.com/daixiang0/gci/pkg/gci" - "github.com/daixiang0/gci/pkg/io" - "github.com/daixiang0/gci/pkg/log" - "github.com/daixiang0/gci/pkg/section" - "github.com/golangci/modinfo" - "github.com/hexops/gotextdiff" - "github.com/hexops/gotextdiff/myers" - "github.com/hexops/gotextdiff/span" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/goformatters" + gcibase "github.com/golangci/golangci-lint/pkg/goformatters/gci" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" ) const linterName = "gci" func New(settings *config.GciSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: goanalysis.DummyRun, - Requires: []*analysis.Analyzer{ - modinfo.Analyzer, - }, - } - - var cfg *gcicfg.Config - if settings != nil { - rawCfg := gcicfg.YamlConfig{ - Cfg: gcicfg.BoolConfig{ - SkipGenerated: settings.SkipGenerated, - CustomOrder: settings.CustomOrder, - NoLexOrder: settings.NoLexOrder, - }, - SectionStrings: settings.Sections, - } - - if settings.LocalPrefixes != "" { - prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)} - rawCfg.SectionStrings = prefix - } - - var err error - cfg, err = YamlConfig{origin: rawCfg}.Parse() - if err != nil { - internal.LinterLogger.Fatalf("gci: configuration parsing: %v", err) - } + formatter, err := gcibase.New(settings) + if err != nil { + internal.LinterLogger.Fatalf("%s: create analyzer: %v", linterName, err) } - var lock sync.Mutex + a := goformatters.NewAnalyzer( + internal.LinterLogger.Child(linterName), + "Checks if code and import statements are formatted, with additional rules.", + formatter, + ) return goanalysis.NewLinter( - linterName, - "Gci controls Go package import order and makes it always deterministic.", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (any, error) { - var err error - cfg.Sections, err = hackSectionList(pass, cfg) - if err != nil { - return nil, err - } - - issues, err := runGci(pass, lintCtx, cfg, &lock) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var diffs []string - err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock) - if err != nil { - return nil, err - } - - var issues []goanalysis.Issue - - for _, diff := range diffs { - if diff == "" { - continue - } - - is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGci) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGci(settings *config.LintersSettings) string { - text := "File is not `gci`-ed" - - hasOptions := settings.Gci.SkipGenerated || len(settings.Gci.Sections) > 0 - if !hasOptions { - return text - } - - text += " with" - - if settings.Gci.SkipGenerated { - text += " --skip-generated" - } - - if len(settings.Gci.Sections) > 0 { - for _, sect := range settings.Gci.Sections { - text += " -s " + sect - } - } - - if settings.Gci.CustomOrder { - text += " --custom-order" - } - - return text -} - -func hackSectionList(pass *analysis.Pass, cfg *gcicfg.Config) (section.SectionList, error) { - var sections section.SectionList - - for _, sect := range cfg.Sections { - // local module hack - if v, ok := sect.(*section.LocalModule); ok { - info, err := modinfo.FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - if info.Path == "" { - continue - } - - v.Path = info.Path - } - - sections = append(sections, sect) - } - - return sections, nil -} - -// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator. -// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI. -// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75 -// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80 -func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error { - log.InitLogger() - defer func() { _ = log.L().Sync() }() - - return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { - fileURI := span.URIFromPath(filePath) - edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) - unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) - lock.Lock() - *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) - lock.Unlock() - return nil - }) -} - -// Code below this comment is borrowed and modified from gci. -// https://github.com/daixiang0/gci/blob/v0.13.5/pkg/config/config.go - -var defaultOrder = map[string]int{ - section.StandardType: 0, - section.DefaultType: 1, - section.CustomType: 2, - section.BlankType: 3, - section.DotType: 4, - section.AliasType: 5, - section.LocalModuleType: 6, -} - -type YamlConfig struct { - origin gcicfg.YamlConfig -} - -//nolint:gocritic // code borrowed from gci and modified to fix LocalModule section behavior. -func (g YamlConfig) Parse() (*gcicfg.Config, error) { - var err error - - sections, err := section.Parse(g.origin.SectionStrings) - if err != nil { - return nil, err - } - - if sections == nil { - sections = section.DefaultSections() - } - - // if default order sorted sections - if !g.origin.Cfg.CustomOrder { - sort.Slice(sections, func(i, j int) bool { - sectionI, sectionJ := sections[i].Type(), sections[j].Type() - - if g.origin.Cfg.NoLexOrder || strings.Compare(sectionI, sectionJ) != 0 { - return defaultOrder[sectionI] < defaultOrder[sectionJ] - } - - return strings.Compare(sections[i].String(), sections[j].String()) < 0 - }) - } - - sectionSeparators, err := section.Parse(g.origin.SectionSeparatorStrings) - if err != nil { - return nil, err - } - if sectionSeparators == nil { - sectionSeparators = section.DefaultSectionSeparators() - } - - return &gcicfg.Config{BoolConfig: g.origin.Cfg, Sections: sections, SectionSeparators: sectionSeparators}, nil + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go index 9873c9ba4..6826b77b6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go @@ -14,18 +14,18 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter { if settings != nil { cfg = &types.Config{ - SuppressLen: types.Boolean(settings.SuppressLenAssertion), - SuppressNil: types.Boolean(settings.SuppressNilAssertion), - SuppressErr: types.Boolean(settings.SuppressErrAssertion), - SuppressCompare: types.Boolean(settings.SuppressCompareAssertion), - SuppressAsync: types.Boolean(settings.SuppressAsyncAssertion), - ForbidFocus: types.Boolean(settings.ForbidFocusContainer), - SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning), - AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero), - ForceExpectTo: types.Boolean(settings.ForceExpectTo), - ValidateAsyncIntervals: types.Boolean(settings.ValidateAsyncIntervals), - ForbidSpecPollution: types.Boolean(settings.ForbidSpecPollution), - ForceSucceedForFuncs: types.Boolean(settings.ForceSucceedForFuncs), + SuppressLen: settings.SuppressLenAssertion, + SuppressNil: settings.SuppressNilAssertion, + SuppressErr: settings.SuppressErrAssertion, + SuppressCompare: settings.SuppressCompareAssertion, + SuppressAsync: settings.SuppressAsyncAssertion, + ForbidFocus: settings.ForbidFocusContainer, + SuppressTypeCompare: settings.SuppressTypeCompareWarning, + AllowHaveLen0: settings.AllowHaveLenZero, + ForceExpectTo: settings.ForceExpectTo, + ValidateAsyncIntervals: settings.ValidateAsyncIntervals, + ForbidSpecPollution: settings.ForbidSpecPollution, + ForceSucceedForFuncs: settings.ForceSucceedForFuncs, } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals/gochecknoglobals.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals/gochecknoglobals.go index af22b2f8e..e893dfcbb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals/gochecknoglobals.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals/gochecknoglobals.go @@ -10,17 +10,10 @@ import ( func New() *goanalysis.Linter { a := checknoglobals.Analyzer() - // gochecknoglobals only lints test files if the `-t` flag is passed, - // so we pass the `t` flag as true to the analyzer before running it. - // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--exclude-files`. - linterConfig := map[string]map[string]any{ - a.Name: {"t": true}, - } - return goanalysis.NewLinter( a.Name, "Check that no global variables exist.", []*analysis.Analyzer{a}, - linterConfig, + nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go index 1345eb8c2..510a06c91 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go @@ -1,46 +1,24 @@ package gochecknoinits import ( - "fmt" "go/ast" - "go/token" - "sync" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "gochecknoinits" func New() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - var res []goanalysis.Issue - for _, file := range pass.Files { - fileIssues := checkFileForInits(file, pass.Fset) - for i := range fileIssues { - res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) - } - } - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - }, + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, } return goanalysis.NewLinter( @@ -48,28 +26,30 @@ func New() *goanalysis.Linter { "Checks that no init functions are present in Go code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func checkFileForInits(f *ast.File, fset *token.FileSet) []result.Issue { - var res []result.Issue - for _, decl := range f.Decls { +func run(pass *analysis.Pass) (any, error) { + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, nil + } + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + insp.Preorder(nodeFilter, func(decl ast.Node) { funcDecl, ok := decl.(*ast.FuncDecl) if !ok { - continue + return } fnName := funcDecl.Name.Name if fnName == "init" && funcDecl.Recv.NumFields() == 0 { - res = append(res, result.Issue{ - Pos: fset.Position(funcDecl.Pos()), - Text: fmt.Sprintf("don't use %s function", internal.FormatCode(fnName, nil)), - FromLinter: linterName, - }) + pass.Reportf(funcDecl.Pos(), "don't use %s function", internal.FormatCode(fnName, nil)) } - } + }) - return res + return nil, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go index 7aab0efeb..cbc587312 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go @@ -61,9 +61,13 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti TypesInfo: pass.TypesInfo, } + cfg := gochecksumtype.Config{ + DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive, + IncludeSharedInterfaces: settings.IncludeSharedInterfaces, + } + var unknownError error - errors := gochecksumtype.Run([]*packages.Package{pkg}, - gochecksumtype.Config{DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive}) + errors := gochecksumtype.Run([]*packages.Package{pkg}, cfg) for _, err := range errors { err, ok := err.(gochecksumtype.Error) if !ok { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go index 194ea3535..0fa4c63d1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go @@ -5,25 +5,23 @@ import ( "fmt" "go/ast" "go/types" - "path/filepath" + "maps" "reflect" "runtime" "slices" - "sort" "strings" "sync" "github.com/go-critic/go-critic/checkers" gocriticlinter "github.com/go-critic/go-critic/linter" _ "github.com/quasilyte/go-ruleguard/dsl" - "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "gocritic" @@ -34,9 +32,6 @@ var ( ) func New(settings *config.GoCriticSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - wrapper := &goCriticWrapper{ sizes: types.SizesFor("gc", runtime.GOARCH), } @@ -45,19 +40,11 @@ func New(settings *config.GoCriticSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := wrapper.run(pass) + err := wrapper.run(pass) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -71,19 +58,19 @@ Dynamic rules are written declaratively with AST patterns, filters, report messa nil, ). WithContextSetter(func(context *linter.Context) { - wrapper.configDir = context.Cfg.GetConfigDir() + wrapper.replacer = strings.NewReplacer( + internal.PlaceholderBasePath, context.Cfg.GetBasePath(), + internal.PlaceholderConfigDir, context.Cfg.GetConfigDir(), //nolint:staticcheck // It must be removed in v2. + ) wrapper.init(context.Log, settings) }). - WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }). WithLoadMode(goanalysis.LoadModeTypesInfo) } type goCriticWrapper struct { settingsWrapper *settingsWrapper - configDir string + replacer *strings.Replacer sizes types.Sizes once sync.Once } @@ -111,9 +98,9 @@ func (w *goCriticWrapper) init(logger logutils.Log, settings *config.GoCriticSet w.settingsWrapper = settingsWrapper } -func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { +func (w *goCriticWrapper) run(pass *analysis.Pass) error { if w.settingsWrapper == nil { - return nil, errors.New("the settings wrapper is nil") + return errors.New("the settings wrapper is nil") } linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes) @@ -122,19 +109,14 @@ func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { enabledCheckers, err := w.buildEnabledCheckers(linterCtx) if err != nil { - return nil, err + return err } linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) - pkgIssues := runOnPackage(linterCtx, enabledCheckers, pass.Files) + runOnPackage(pass, enabledCheckers, pass.Files) - issues := make([]goanalysis.Issue, 0, len(pkgIssues)) - for i := range pkgIssues { - issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass)) - } - - return issues, nil + return nil } func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { @@ -154,6 +136,7 @@ func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context if err != nil { return nil, err } + enabledCheckers = append(enabledCheckers, c) } @@ -184,8 +167,7 @@ func (w *goCriticWrapper) configureCheckerInfo( info.Name, k) } - supportedKeys := maps.Keys(info.Params) - sort.Strings(supportedKeys) + supportedKeys := slices.Sorted(maps.Keys(info.Params)) return fmt.Errorf("checker %s config param %s doesn't exist, all existing: %s", info.Name, k, supportedKeys) @@ -208,53 +190,42 @@ func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any { return rv.Bool() case reflect.String: // Perform variable substitution. - return strings.ReplaceAll(rv.String(), "${configDir}", w.configDir) + return w.replacer.Replace(rv.String()) default: return p } } -func runOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue { - var res []result.Issue +func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files []*ast.File) { for _, f := range files { - filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) - linterCtx.SetFileInfo(filename, f) - - issues := runOnFile(linterCtx, f, checks) - res = append(res, issues...) + runOnFile(pass, f, checks) } - return res } -func runOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue { - var res []result.Issue - +func runOnFile(pass *analysis.Pass, f *ast.File, checks []*gocriticlinter.Checker) { for _, c := range checks { // All checkers are expected to use *lint.Context // as read-only structure, so no copying is required. for _, warn := range c.Check(f) { - pos := linterCtx.FileSet.Position(warn.Pos) - issue := result.Issue{ - Pos: pos, - Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), - FromLinter: linterName, + diag := analysis.Diagnostic{ + Pos: warn.Pos, + Category: c.Info.Name, + Message: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), } if warn.HasQuickFix() { - issue.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: pos.Column - 1, - Length: int(warn.Suggestion.To - warn.Suggestion.From), - NewString: string(warn.Suggestion.Replacement), - }, - } + diag.SuggestedFixes = []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: warn.Suggestion.From, + End: warn.Suggestion.To, + NewText: warn.Suggestion.Replacement, + }}, + }} } - res = append(res, issue) + pass.Report(diag) } } - - return res } type goCriticChecks[T any] map[string]T @@ -297,8 +268,7 @@ func newSettingsWrapper(settings *config.GoCriticSettings, logger logutils.Log) } } - allTagsSorted := maps.Keys(allChecksByTag) - sort.Strings(allTagsSorted) + allTagsSorted := slices.Sorted(maps.Keys(allChecksByTag)) return &settingsWrapper{ GoCriticSettings: settings, @@ -326,6 +296,7 @@ func (s *settingsWrapper) InferEnabledChecks() { s.debugChecksInitialState() enabledByDefaultChecks, disabledByDefaultChecks := s.buildEnabledAndDisabledByDefaultChecks() + debugChecksListf(enabledByDefaultChecks, "Enabled by default") debugChecksListf(disabledByDefaultChecks, "Disabled by default") @@ -346,7 +317,8 @@ func (s *settingsWrapper) InferEnabledChecks() { if len(s.EnabledTags) != 0 { enabledFromTags := s.expandTagsToChecks(s.EnabledTags) - debugChecksListf(enabledFromTags, "Enabled by config tags %s", sprintSortedStrings(s.EnabledTags)) + + debugChecksListf(enabledFromTags, "Enabled by config tags %s", s.EnabledTags) for _, check := range enabledFromTags { enabledChecks[check] = struct{}{} @@ -367,7 +339,8 @@ func (s *settingsWrapper) InferEnabledChecks() { if len(s.DisabledTags) != 0 { disabledFromTags := s.expandTagsToChecks(s.DisabledTags) - debugChecksListf(disabledFromTags, "Disabled by config tags %s", sprintSortedStrings(s.DisabledTags)) + + debugChecksListf(disabledFromTags, "Disabled by config tags %s", s.DisabledTags) for _, check := range disabledFromTags { delete(enabledChecks, check) @@ -388,6 +361,7 @@ func (s *settingsWrapper) InferEnabledChecks() { s.inferredEnabledChecks = enabledChecks s.inferredEnabledChecksLowerCased = normalizeMap(s.inferredEnabledChecks) + s.debugChecksFinalState() } @@ -581,10 +555,7 @@ func debugChecksListf(checks []string, format string, args ...any) { return } - debugf("%s checks (%d): %s", fmt.Sprintf(format, args...), len(checks), sprintSortedStrings(checks)) -} + v := slices.Sorted(slices.Values(checks)) -func sprintSortedStrings(v []string) string { - sort.Strings(slices.Clone(v)) - return fmt.Sprint(v) + debugf("%s checks (%d): %s", fmt.Sprintf(format, args...), len(checks), strings.Join(v, ", ")) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go index fc51b5bb8..3194b3d3a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go @@ -1,23 +1,18 @@ package godot import ( - "sync" + "cmp" "github.com/tetafro/godot" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "godot" func New(settings *config.GodotSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var dotSettings godot.Settings if settings != nil { @@ -29,32 +24,22 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { } // Convert deprecated setting - if settings.CheckAll { + if settings.CheckAll != nil && *settings.CheckAll { dotSettings.Scope = godot.AllScope } } - if dotSettings.Scope == "" { - dotSettings.Scope = godot.DeclScope - } + dotSettings.Scope = cmp.Or(dotSettings.Scope, godot.DeclScope) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runGodot(pass, dotSettings) + err := runGodot(pass, dotSettings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -64,38 +49,40 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { "Check if comments end in a period", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) { - var lintIssues []godot.Issue +func runGodot(pass *analysis.Pass, settings godot.Settings) error { for _, file := range pass.Files { iss, err := godot.Run(file, pass.Fset, settings) if err != nil { - return nil, err + return err } - lintIssues = append(lintIssues, iss...) - } - - if len(lintIssues) == 0 { - return nil, nil - } - issues := make([]goanalysis.Issue, len(lintIssues)) - for k, i := range lintIssues { - issue := result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: linterName, - Replacement: &result.Replacement{ - NewLines: []string{i.Replacement}, - }, + if len(iss) == 0 { + continue } - issues[k] = goanalysis.NewIssue(&issue, pass) + f := pass.Fset.File(file.Pos()) + + for _, i := range iss { + start := f.Pos(i.Pos.Offset) + end := goanalysis.EndOfLinePos(f, i.Pos.Line) + + pass.Report(analysis.Diagnostic{ + Pos: start, + End: end, + Message: i.Message, + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(i.Replacement), + }}, + }}, + }) + } } - return issues, nil + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go index d8de026ba..589789d14 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go @@ -3,73 +3,60 @@ package godox import ( "go/token" "strings" - "sync" "github.com/matoous/godox" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "godox" func New(settings *config.GodoxSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runGodox(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil + return run(pass, settings), nil }, } return goanalysis.NewLinter( linterName, - "Tool for detection of FIXME, TODO and other comment keywords", + "Detects usage of FIXME, TODO and other keywords inside comments", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue { - var messages []godox.Message +func run(pass *analysis.Pass, settings *config.GodoxSettings) error { for _, file := range pass.Files { - messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...) - } - - if len(messages) == 0 { - return nil - } - - issues := make([]goanalysis.Issue, len(messages)) - - for k, i := range messages { - issues[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: linterName, - }, pass) + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } + + messages, err := godox.Run(file, pass.Fset, settings.Keywords...) + if err != nil { + return err + } + + if len(messages) == 0 { + continue + } + + nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false) + + ft := pass.Fset.File(file.Pos()) + + for _, i := range messages { + pass.Report(analysis.Diagnostic{ + Pos: ft.LineStart(goanalysis.AdjustPos(i.Pos.Line, nonAdjPosition.Line, position.Line)) + token.Pos(i.Pos.Column), + Message: strings.TrimRight(i.Message, "\n"), + }) + } } - return issues + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go index 289ceab8a..221224294 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go @@ -1,98 +1,28 @@ package gofmt import ( - "fmt" - "sync" - - gofmtAPI "github.com/golangci/gofmt/gofmt" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/goformatters" + gofmtbase "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" ) const linterName = "gofmt" func New(settings *config.GoFmtSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: goanalysis.DummyRun, - } + a := goformatters.NewAnalyzer( + internal.LinterLogger.Child(linterName), + "Checks if the code is formatted according to 'gofmt' command.", + gofmtbase.New(settings), + ) return goanalysis.NewLinter( - linterName, - "Gofmt checks whether code was gofmt-ed. By default "+ - "this tool runs with -s option to check for code simplification", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runGofmt(lintCtx, pass, settings) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var rewriteRules []gofmtAPI.RewriteRule - for _, rule := range settings.RewriteRules { - rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule)) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules) - if err != nil { // TODO: skip - return nil, err - } - if diff == nil { - continue - } - - is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoFmt) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGoFmt(settings *config.LintersSettings) string { - text := "File is not `gofmt`-ed" - if settings.Gofmt.Simplify { - text += " with `-s`" - } - for _, rule := range settings.Gofmt.RewriteRules { - text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement) - } - - return text + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go index 3bb7df12e..878a5c79b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go @@ -1,132 +1,28 @@ package gofumpt import ( - "bytes" - "fmt" - "io" - "os" - "strings" - "sync" - - "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" - "mvdan.cc/gofumpt/format" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/goformatters" + gofumptbase "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" ) const linterName = "gofumpt" -type differ interface { - Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error -} - func New(settings *config.GofumptSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - diff := difflib.New() - - var options format.Options - - if settings != nil { - options = format.Options{ - LangVersion: getLangVersion(settings), - ModulePath: settings.ModulePath, - ExtraRules: settings.ExtraRules, - } - } - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: goanalysis.DummyRun, - } + a := goformatters.NewAnalyzer( + internal.LinterLogger.Child(linterName), + "Checks if code and import statements are formatted, with additional rules.", + gofumptbase.New(settings, settings.LangVersion), + ) return goanalysis.NewLinter( - linterName, - "Gofumpt checks whether code was gofumpt-ed.", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runGofumpt(lintCtx, pass, diff, options) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - - for _, f := range fileNames { - input, err := os.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to open file %s: %w", f, err) - } - - output, err := format.Source(input, options) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - if !bytes.Equal(input, output) { - out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) - - err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - diff := out.String() - is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGoFumpt) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - } - - return issues, nil -} - -func getLangVersion(settings *config.GofumptSettings) string { - if settings == nil || settings.LangVersion == "" { - // TODO: defaults to "1.15", in the future (v2) must be removed. - return "go1.15" - } - - return "go" + strings.TrimPrefix(settings.LangVersion, "go") -} - -func getIssuedTextGoFumpt(settings *config.LintersSettings) string { - text := "File is not `gofumpt`-ed" - - if settings.Gofumpt.ExtraRules { - text += " with `-extra`" - } - - return text + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go index c6b1aae6b..d24ad453e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go @@ -2,29 +2,25 @@ package goheader import ( "go/token" - "sync" + "strings" goheader "github.com/denis-tingaikin/go-header" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/golangci-lint/pkg/golinters/internal" ) const linterName = "goheader" -func New(settings *config.GoHeaderSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - +func New(settings *config.GoHeaderSettings, basePath string) *goanalysis.Linter { conf := &goheader.Configuration{} if settings != nil { conf = &goheader.Configuration{ Values: settings.Values, Template: settings.Template, - TemplatePath: settings.TemplatePath, + TemplatePath: strings.ReplaceAll(settings.TemplatePath, internal.PlaceholderBasePath, basePath), } } @@ -32,19 +28,11 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runGoHeader(pass, conf) + err := runGoHeader(pass, conf) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -54,62 +42,91 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { "Checks if file header matches to pattern", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) { +func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) error { if conf.TemplatePath == "" && conf.Template == "" { // User did not pass template, so then do not run go-header linter - return nil, nil + return nil } template, err := conf.GetTemplate() if err != nil { - return nil, err + return err } values, err := conf.GetValues() if err != nil { - return nil, err + return err } a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) - var issues []goanalysis.Issue for _, file := range pass.Files { - path := pass.Fset.Position(file.Pos()).Filename + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } + + issue := a.Analyze(&goheader.Target{File: file, Path: position.Filename}) + if issue == nil { + continue + } - i := a.Analyze(&goheader.Target{File: file, Path: path}) + f := pass.Fset.File(file.Pos()) - if i == nil { + commentLine := 1 + var offset int + + // Inspired by https://github.com/denis-tingaikin/go-header/blob/4c75a6a2332f025705325d6c71fff4616aedf48f/analyzer.go#L85-L92 + if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package { + if !strings.HasPrefix(file.Comments[0].List[0].Text, "/*") { + // When the comment is "//" there is a one character offset. + offset = 1 + } + commentLine = goanalysis.GetFilePositionFor(pass.Fset, file.Comments[0].Pos()).Line + } + + // Skip issues related to build directives. + // https://github.com/denis-tingaikin/go-header/issues/18 + if issue.Location().Position-offset < 0 { continue } - issue := result.Issue{ - Pos: token.Position{ - Line: i.Location().Line + 1, - Column: i.Location().Position, - Filename: path, - }, - Text: i.Message(), - FromLinter: linterName, + diag := analysis.Diagnostic{ + Pos: f.LineStart(issue.Location().Line+1) + token.Pos(issue.Location().Position-offset), // The position of the first divergence. + Message: issue.Message(), } - if fix := i.Fix(); fix != nil { - issue.LineRange = &result.Range{ - From: issue.Line(), - To: issue.Line() + len(fix.Actual) - 1, + if fix := issue.Fix(); fix != nil { + current := len(fix.Actual) + for _, s := range fix.Actual { + current += len(s) } - issue.Replacement = &result.Replacement{ - NeedOnlyDelete: len(fix.Expected) == 0, - NewLines: fix.Expected, + + start := f.LineStart(commentLine) + + end := start + token.Pos(current) + + header := strings.Join(fix.Expected, "\n") + "\n" + + // Adds an extra line between the package and the header. + if end == file.Package { + header += "\n" } + + diag.SuggestedFixes = []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(header), + }}, + }} } - issues = append(issues, goanalysis.NewIssue(&issue, pass)) + pass.Report(diag) } - return issues, nil + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go index de965d5c8..d7ba98559 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go @@ -1,94 +1,28 @@ package goimports import ( - "fmt" - "sync" - - goimportsAPI "github.com/golangci/gofmt/goimports" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/imports" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/goformatters" + goimportsbase "github.com/golangci/golangci-lint/pkg/goformatters/goimports" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" ) const linterName = "goimports" func New(settings *config.GoImportsSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: goanalysis.DummyRun, - } + a := goformatters.NewAnalyzer( + internal.LinterLogger.Child(linterName), + "Checks if the code and import statements are formatted according to the 'goimports' command.", + goimportsbase.New(settings), + ) return goanalysis.NewLinter( - linterName, - "Check import statements are formatted according to the 'goimport' command. "+ - "Reformat imports in autofix mode.", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - imports.LocalPrefix = settings.LocalPrefixes - - analyzer.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runGoImports(lintCtx, pass) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := goimportsAPI.Run(f) - if err != nil { // TODO: skip - return nil, err - } - if diff == nil { - continue - } - - is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoImports) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGoImports(settings *config.LintersSettings) string { - text := "File is not `goimports`-ed" - - if settings.Goimports.LocalPrefixes != "" { - text += " with -local " + settings.Goimports.LocalPrefixes - } - - return text + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go index 9cde7e26c..f8f47ba2b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go @@ -1,6 +1,7 @@ package gomoddirectives import ( + "regexp" "sync" "github.com/ldez/gomoddirectives" @@ -8,6 +9,7 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) @@ -24,6 +26,27 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { opts.ReplaceAllowList = settings.ReplaceAllowList opts.RetractAllowNoExplanation = settings.RetractAllowNoExplanation opts.ExcludeForbidden = settings.ExcludeForbidden + opts.ToolchainForbidden = settings.ToolchainForbidden + opts.ToolForbidden = settings.ToolForbidden + opts.GoDebugForbidden = settings.GoDebugForbidden + + if settings.ToolchainPattern != "" { + exp, err := regexp.Compile(settings.ToolchainPattern) + if err != nil { + internal.LinterLogger.Fatalf("%s: invalid toolchain pattern: %v", linterName, err) + } else { + opts.ToolchainPattern = exp + } + } + + if settings.GoVersionPattern != "" { + exp, err := regexp.Compile(settings.GoVersionPattern) + if err != nil { + internal.LinterLogger.Fatalf("%s: invalid Go version pattern: %v", linterName, err) + } else { + opts.GoVersionPattern = exp + } + } } analyzer := &analysis.Analyzer{ @@ -40,7 +63,7 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { once.Do(func() { - results, err := gomoddirectives.Analyze(opts) + results, err := gomoddirectives.AnalyzePass(pass, opts) if err != nil { lintCtx.Log.Warnf("running %s failed: %s: "+ "if you are not using go modules it is suggested to disable this linter", linterName, err) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go index 8f1036b0f..8bddebc16 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go @@ -73,7 +73,7 @@ func New(settings *config.GoModGuardSettings) *goanalysis.Linter { } analyzer.Run = func(pass *analysis.Pass) (any, error) { - gomodguardIssues := processor.ProcessFiles(internal.GetFileNames(pass)) + gomodguardIssues := processor.ProcessFiles(internal.GetGoFileNames(pass)) mu.Lock() defer mu.Unlock() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go index a5367399b..6b46beacc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go @@ -184,7 +184,15 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { } for k, v := range globalOptionMap { - conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v)) + option := gosec.GlobalOption(k) + + // Set nosec global option only if the value is true + // https://github.com/securego/gosec/blob/v2.21.4/analyzer.go#L572 + if option == gosec.Nosec && v == false { + continue + } + + conf.SetGlobal(option, fmt.Sprintf("%v", v)) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go index 4f6fb8035..bf9b19f12 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go @@ -10,16 +10,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(s *config.GosmopolitanSettings) *goanalysis.Linter { +func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { a := gosmopolitan.NewAnalyzer() - cfgMap := map[string]map[string]any{} - if s != nil { - cfgMap[a.Name] = map[string]any{ - "allowtimelocal": s.AllowTimeLocal, - "escapehatches": strings.Join(s.EscapeHatches, ","), - "lookattests": !s.IgnoreTests, - "watchforscripts": strings.Join(s.WatchForScripts, ","), + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "allowtimelocal": settings.AllowTimeLocal, + "escapehatches": strings.Join(settings.EscapeHatches, ","), + "lookattests": !settings.IgnoreTests, + "watchforscripts": strings.Join(settings.WatchForScripts, ","), } } @@ -27,6 +27,6 @@ func New(s *config.GosmopolitanSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go index eb63a5d33..b970e4039 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go @@ -2,7 +2,7 @@ package govet import ( "slices" - "sort" + "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/appends" @@ -40,6 +40,7 @@ import ( "golang.org/x/tools/go/analysis/passes/slog" "golang.org/x/tools/go/analysis/passes/sortslice" "golang.org/x/tools/go/analysis/passes/stdmethods" + "golang.org/x/tools/go/analysis/passes/stdversion" "golang.org/x/tools/go/analysis/passes/stringintconv" "golang.org/x/tools/go/analysis/passes/structtag" "golang.org/x/tools/go/analysis/passes/testinggoroutine" @@ -50,6 +51,7 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/unusedwrite" + "golang.org/x/tools/go/analysis/passes/waitgroup" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" @@ -89,6 +91,7 @@ var ( slog.Analyzer, sortslice.Analyzer, stdmethods.Analyzer, + stdversion.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -99,9 +102,10 @@ var ( unsafeptr.Analyzer, unusedresult.Analyzer, unusedwrite.Analyzer, + waitgroup.Analyzer, } - // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81 + // https://github.com/golang/go/blob/go1.23.0/src/cmd/vet/main.go#L55-L87 defaultAnalyzers = []*analysis.Analyzer{ appends.Analyzer, asmdecl.Analyzer, @@ -126,6 +130,7 @@ var ( sigchanyzer.Analyzer, slog.Analyzer, stdmethods.Analyzer, + stdversion.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -159,8 +164,8 @@ func New(settings *config.GovetSettings) *goanalysis.Linter { } func analyzersFromConfig(settings *config.GovetSettings) []*analysis.Analyzer { - debugAnalyzersListf(allAnalyzers, "All available analyzers") - debugAnalyzersListf(defaultAnalyzers, "Default analyzers") + logAnalyzers("All available analyzers", allAnalyzers) + logAnalyzers("Default analyzers", defaultAnalyzers) if settings == nil { return defaultAnalyzers @@ -173,7 +178,7 @@ func analyzersFromConfig(settings *config.GovetSettings) []*analysis.Analyzer { } } - debugAnalyzersListf(enabledAnalyzers, "Enabled by config analyzers") + logAnalyzers("Enabled by config analyzers", enabledAnalyzers) return enabledAnalyzers } @@ -185,7 +190,7 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } // Keeping for backward compatibility. - if cfg.CheckShadowing && name == shadow.Analyzer.Name { + if cfg.CheckShadowing != nil && *cfg.CheckShadowing && name == shadow.Analyzer.Name { return true } @@ -207,7 +212,7 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } } -func debugAnalyzersListf(analyzers []*analysis.Analyzer, message string) { +func logAnalyzers(message string, analyzers []*analysis.Analyzer) { if !isDebug { return } @@ -217,7 +222,7 @@ func debugAnalyzersListf(analyzers []*analysis.Analyzer, message string) { analyzerNames = append(analyzerNames, a.Name) } - sort.Strings(analyzerNames) + slices.Sort(analyzerNames) - debugf("%s (%d): %s", message, len(analyzerNames), analyzerNames) + debugf("%s (%d): %s", message, len(analyzerNames), strings.Join(analyzerNames, ", ")) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go index aa6ce1ceb..e0a3f794a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go @@ -11,9 +11,9 @@ import ( func New(settings *config.GrouperSettings) *goanalysis.Linter { a := grouper.New() - linterCfg := map[string]map[string]any{} + cfg := map[string]map[string]any{} if settings != nil { - linterCfg[a.Name] = map[string]any{ + cfg[a.Name] = map[string]any{ "const-require-single-const": settings.ConstRequireSingleConst, "const-require-grouping": settings.ConstRequireGrouping, "import-require-single-import": settings.ImportRequireSingleImport, @@ -29,6 +29,6 @@ func New(settings *config.GrouperSettings) *goanalysis.Linter { a.Name, "Analyze expression groups.", []*analysis.Analyzer{a}, - linterCfg, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go index 45117c9a4..b7c6c35ae 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go @@ -51,8 +51,11 @@ func New(settings *config.ImportAsSettings) *goanalysis.Linter { uniqPackages[a.Pkg] = a } - // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`). - if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") { + // Skips the duplication check when: + // - the alias is empty. + // - the alias is a regular expression replacement pattern (ie. contains `$`). + v, ok := uniqAliases[a.Alias] + if ok && a.Alias != "" && !strings.Contains(a.Alias, "$") { lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg) } else { uniqAliases[a.Alias] = a diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/commons.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/commons.go index c21dd0092..ebb0b13a0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/commons.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/commons.go @@ -4,3 +4,12 @@ import "github.com/golangci/golangci-lint/pkg/logutils" // LinterLogger must be use only when the context logger is not available. var LinterLogger = logutils.NewStderrLog(logutils.DebugKeyLinter) + +// Placeholders used inside linters to evaluate relative paths. +const ( + PlaceholderBasePath = "${base-path}" + // Deprecated: it must be removed in v2. + // [PlaceholderBasePath] will be the only one placeholder as it is a dynamic value based on + // [github.com/golangci/golangci-lint/pkg/config.Run.RelativePathMode]. + PlaceholderConfigDir = "${configDir}" +) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go deleted file mode 100644 index f919c5b2a..000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go +++ /dev/null @@ -1,264 +0,0 @@ -package internal - -import ( - "bytes" - "fmt" - "go/token" - "strings" - - diffpkg "github.com/sourcegraph/go-diff/diff" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" -) - -type Change struct { - LineRange result.Range - Replacement result.Replacement -} - -type diffLineType string - -const ( - diffLineAdded diffLineType = "added" - diffLineOriginal diffLineType = "original" - diffLineDeleted diffLineType = "deleted" -) - -type fmtTextFormatter func(settings *config.LintersSettings) string - -type diffLine struct { - originalNumber int // 1-based original line number - typ diffLineType - data string // "+" or "-" stripped line -} - -type hunkChangesParser struct { - // needed because we merge currently added lines with the last original line - lastOriginalLine *diffLine - - // if the first line of diff is an adding we save all additions to replacementLinesToPrepend - replacementLinesToPrepend []string - - log logutils.Log - - lines []diffLine - - ret []Change -} - -func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { - lines := bytes.Split(h.Body, []byte{'\n'}) - currentOriginalLineNumber := int(h.OrigStartLine) - var ret []diffLine - - for i, line := range lines { - dl := diffLine{ - originalNumber: currentOriginalLineNumber, - } - - lineStr := string(line) - - if strings.HasPrefix(lineStr, "-") { - dl.typ = diffLineDeleted - dl.data = strings.TrimPrefix(lineStr, "-") - currentOriginalLineNumber++ - } else if strings.HasPrefix(lineStr, "+") { - dl.typ = diffLineAdded - dl.data = strings.TrimPrefix(lineStr, "+") - } else { - if i == len(lines)-1 && lineStr == "" { - // handle last \n: don't add an empty original line - break - } - - dl.typ = diffLineOriginal - dl.data = strings.TrimPrefix(lineStr, " ") - currentOriginalLineNumber++ - } - - ret = append(ret, dl) - } - - // if > 0, then the original file had a 'No newline at end of file' mark - if h.OrigNoNewlineAt > 0 { - dl := diffLine{ - originalNumber: currentOriginalLineNumber + 1, - typ: diffLineAdded, - data: "", - } - ret = append(ret, dl) - } - - p.lines = ret -} - -func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { - if len(p.replacementLinesToPrepend) == 0 { - p.lastOriginalLine = &line - *i++ - return - } - - // check following added lines for the case: - // + added line 1 - // original line - // + added line 2 - - *i++ - var followingAddedLines []string - for ; *i < len(p.lines) && p.lines[*i].typ == diffLineAdded; *i++ { - followingAddedLines = append(followingAddedLines, p.lines[*i].data) - } - - p.ret = append(p.ret, Change{ - LineRange: result.Range{ - From: line.originalNumber, - To: line.originalNumber, - }, - Replacement: result.Replacement{ - NewLines: append(p.replacementLinesToPrepend, append([]string{line.data}, followingAddedLines...)...), - }, - }) - p.replacementLinesToPrepend = nil - p.lastOriginalLine = &line -} - -func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) { - change := Change{ - LineRange: result.Range{ - From: deletedLines[0].originalNumber, - To: deletedLines[len(deletedLines)-1].originalNumber, - }, - } - - if len(addedLines) != 0 { - change.Replacement.NewLines = append([]string{}, p.replacementLinesToPrepend...) - change.Replacement.NewLines = append(change.Replacement.NewLines, addedLines...) - if len(p.replacementLinesToPrepend) != 0 { - p.replacementLinesToPrepend = nil - } - - p.ret = append(p.ret, change) - return - } - - // delete-only change with possible prepending - if len(p.replacementLinesToPrepend) != 0 { - change.Replacement.NewLines = p.replacementLinesToPrepend - p.replacementLinesToPrepend = nil - } else { - change.Replacement.NeedOnlyDelete = true - } - - p.ret = append(p.ret, change) -} - -func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { - if p.lastOriginalLine == nil { - // the first line is added; the diff looks like: - // 1. + ... - // 2. - ... - // or - // 1. + ... - // 2. ... - - p.replacementLinesToPrepend = addedLines - return - } - - // add-only change merged into the last original line with possible prepending - p.ret = append(p.ret, Change{ - LineRange: result.Range{ - From: p.lastOriginalLine.originalNumber, - To: p.lastOriginalLine.originalNumber, - }, - Replacement: result.Replacement{ - NewLines: append(p.replacementLinesToPrepend, append([]string{p.lastOriginalLine.data}, addedLines...)...), - }, - }) - p.replacementLinesToPrepend = nil -} - -func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { - p.parseDiffLines(h) - - for i := 0; i < len(p.lines); { - line := p.lines[i] - if line.typ == diffLineOriginal { - p.handleOriginalLine(line, &i) - continue - } - - var deletedLines []diffLine - for ; i < len(p.lines) && p.lines[i].typ == diffLineDeleted; i++ { - deletedLines = append(deletedLines, p.lines[i]) - } - - var addedLines []string - for ; i < len(p.lines) && p.lines[i].typ == diffLineAdded; i++ { - addedLines = append(addedLines, p.lines[i].data) - } - - if len(deletedLines) != 0 { - p.handleDeletedLines(deletedLines, addedLines) - continue - } - - // no deletions, only additions - p.handleAddedOnlyLines(addedLines) - } - - if len(p.replacementLinesToPrepend) != 0 { - p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", p.lines) - return nil - } - - return p.ret -} - -func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string, formatter fmtTextFormatter) ([]result.Issue, error) { - diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) - if err != nil { - return nil, fmt.Errorf("can't parse patch: %w", err) - } - - if len(diffs) == 0 { - return nil, fmt.Errorf("got no diffs from patch parser: %v", patch) - } - - var issues []result.Issue - for _, d := range diffs { - if len(d.Hunks) == 0 { - lintCtx.Log.Warnf("Got no hunks in diff %+v", d) - continue - } - - for _, hunk := range d.Hunks { - p := hunkChangesParser{log: lintCtx.Log} - - changes := p.parse(hunk) - - for _, change := range changes { - i := result.Issue{ - FromLinter: linterName, - Pos: token.Position{ - Filename: d.NewName, - Line: change.LineRange.From, - }, - Text: formatter(lintCtx.Settings()), - Replacement: &change.Replacement, - } - if change.LineRange.From != change.LineRange.To { - i.LineRange = &change.LineRange - } - - issues = append(issues, i) - } - } - } - - return issues, nil -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go index 80b194dd2..7525f2f2c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go @@ -2,12 +2,12 @@ package internal import ( "fmt" - "path/filepath" "strings" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" ) func FormatCode(code string, _ *config.Config) string { @@ -18,16 +18,17 @@ func FormatCode(code string, _ *config.Config) string { return fmt.Sprintf("`%s`", code) } -func GetFileNames(pass *analysis.Pass) []string { - var fileNames []string +func GetGoFileNames(pass *analysis.Pass) []string { + var filenames []string + for _, f := range pass.Files { - fileName := pass.Fset.PositionFor(f.Pos(), true).Filename - ext := filepath.Ext(fileName) - if ext != "" && ext != ".go" { - // position has been adjusted to a non-go file, revert to original file - fileName = pass.Fset.PositionFor(f.Pos(), false).Filename + position, b := goanalysis.GetGoFilePosition(pass, f) + if !b { + continue } - fileNames = append(fileNames, fileName) + + filenames = append(filenames, position.Filename) } - return fileNames + + return filenames } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go index 67f89eecb..bad3b0c4e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go @@ -4,19 +4,15 @@ import ( "bufio" "errors" "fmt" - "go/token" + "go/ast" "os" "strings" - "sync" "unicode/utf8" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "lll" @@ -24,26 +20,15 @@ const linterName = "lll" const goCommentDirectivePrefix = "//go:" func New(settings *config.LllSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runLll(pass, settings) + err := runLll(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -53,40 +38,39 @@ func New(settings *config.LllSettings) *goanalysis.Linter { "Reports long lines", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - +func runLll(pass *analysis.Pass, settings *config.LllSettings) error { spaces := strings.Repeat(" ", settings.TabWidth) - var issues []goanalysis.Issue - for _, f := range fileNames { - lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces) + for _, file := range pass.Files { + err := getLLLIssuesForFile(pass, file, settings.LineLength, spaces) if err != nil { - return nil, err - } - - for i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + return err } } - return issues, nil + return nil } -func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { - var res []result.Issue +func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, tabSpaces string) error { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + return nil + } + + nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false) - f, err := os.Open(filename) + f, err := os.Open(position.Filename) if err != nil { - return nil, fmt.Errorf("can't open file %s: %w", filename, err) + return fmt.Errorf("can't open file %s: %w", position.Filename, err) } + defer f.Close() + ft := pass.Fset.File(file.Pos()) + lineNumber := 0 multiImportEnabled := false @@ -116,42 +100,34 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r lineLen := utf8.RuneCountInString(line) if lineLen > maxLineLen { - res = append(res, result.Issue{ - Pos: token.Position{ - Filename: filename, - Line: lineNumber, - }, - Text: fmt.Sprintf("the line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen), - FromLinter: linterName, + pass.Report(analysis.Diagnostic{ + Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)), + Message: fmt.Sprintf("The line is %d characters long, which exceeds the maximum of %d characters.", + lineLen, maxLineLen), }) } } if err := scanner.Err(); err != nil { + // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize + // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize + // we can return this line as a long line instead of returning an error. + // The reason for this change is that this case might happen with autogenerated files + // The go-bindata tool for instance might generate a file with a very long line. + // In this case, as it's an auto generated file, the warning returned by lll will + // be ignored. + // But if we return a linter error here, and this error happens for an autogenerated + // file the error will be discarded (fine), but all the subsequent errors for lll will + // be discarded for other files, and we'll miss legit error. if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize { - // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize - // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize - // we can return this line as a long line instead of returning an error. - // The reason for this change is that this case might happen with autogenerated files - // The go-bindata tool for instance might generate a file with a very long line. - // In this case, as it's an auto generated file, the warning returned by lll will - // be ignored. - // But if we return a linter error here, and this error happens for an autogenerated - // file the error will be discarded (fine), but all the subsequent errors for lll will - // be discarded for other files, and we'll miss legit error. - res = append(res, result.Issue{ - Pos: token.Position{ - Filename: filename, - Line: lineNumber, - Column: 1, - }, - Text: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), - FromLinter: linterName, + pass.Report(analysis.Diagnostic{ + Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)), + Message: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), }) } else { - return nil, fmt.Errorf("can't scan file %s: %w", filename, err) + return fmt.Errorf("can't scan file %s: %w", position.Filename, err) } } - return res, nil + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go index 077e8a512..84c8d7363 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go @@ -22,6 +22,9 @@ func New(settings *config.LoggerCheckSettings) *goanalysis.Linter { if !settings.Logr { disable = append(disable, "logr") } + if !settings.Slog { + disable = append(disable, "slog") + } if !settings.Zap { disable = append(disable, "zap") } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go index 08f12369e..799c51c87 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go @@ -8,16 +8,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { +func New(settings *config.MaintIdxSettings) *goanalysis.Linter { analyzer := maintidx.Analyzer - cfgMap := map[string]map[string]any{ + cfg := map[string]map[string]any{ analyzer.Name: {"under": 20}, } - if cfg != nil { - cfgMap[analyzer.Name] = map[string]any{ - "under": cfg.Under, + if settings != nil { + cfg[analyzer.Name] = map[string]any{ + "under": settings.Under, } } @@ -25,6 +25,6 @@ func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go index ae4bf2184..b5ab4515e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go @@ -2,40 +2,26 @@ package makezero import ( "fmt" - "sync" "github.com/ashanbrown/makezero/makezero" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "makezero" func New(settings *config.MakezeroSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runMakeZero(pass, settings) + err := runMakeZero(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -45,30 +31,25 @@ func New(settings *config.MakezeroSettings) *goanalysis.Linter { "Finds slice declarations with non-zero initial length", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) { +func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) error { zero := makezero.NewLinter(settings.Always) - var issues []goanalysis.Issue - for _, file := range pass.Files { hints, err := zero.Run(pass.Fset, pass.TypesInfo, file) if err != nil { - return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) + return fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) } for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: hint.Pos(), + Message: hint.Details(), + }) } } - return issues, nil + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go index 34b880b52..e15dfa3a5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go @@ -1,70 +1,30 @@ package mirror import ( - "sync" - "github.com/butuzov/mirror" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New() *goanalysis.Linter { - var ( - mu sync.Mutex - issues []goanalysis.Issue - ) - a := mirror.NewAnalyzer() - a.Run = func(pass *analysis.Pass) (any, error) { - // mirror only lints test files if the `--with-tests` flag is passed, - // so we pass the `with-tests` flag as true to the analyzer before running it. - // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` - // or can be disabled per linter via exclude rules. - // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) - violations := mirror.Run(pass, true) - - if len(violations) == 0 { - return nil, nil - } - - for index := range violations { - i := violations[index].Issue(pass.Fset) - issue := result.Issue{ - FromLinter: a.Name, - Text: i.Message, - Pos: i.Start, - } - - if i.InlineFix != "" { - issue.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: i.Start.Column - 1, - Length: len(i.Original), - NewString: i.InlineFix, - }, - } - } - - mu.Lock() - issues = append(issues, goanalysis.NewIssue(&issue, pass)) - mu.Unlock() - } - - return nil, nil + // mirror only lints test files if the `--with-tests` flag is passed, + // so we pass the `with-tests` flag as true to the analyzer before running it. + // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` + // or can be disabled per linter via exclude rules. + // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) + linterCfg := map[string]map[string]any{ + a.Name: { + "with-tests": true, + }, } - analyzer := goanalysis.NewLinter( + return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, - nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) - - return analyzer + linterCfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go index 44409cec9..9d19780ac 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go @@ -2,9 +2,9 @@ package misspell import ( "fmt" + "go/ast" "go/token" "strings" - "sync" "unicode" "github.com/golangci/misspell" @@ -13,71 +13,37 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "misspell" func New(settings *config.MisspellSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: goanalysis.DummyRun, + replacer, err := createMisspellReplacer(settings) + if err != nil { + internal.LinterLogger.Fatalf("%s: %v", linterName, err) } - return goanalysis.NewLinter( - linterName, - "Finds commonly misspelled English words", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - replacer, ruleErr := createMisspellReplacer(settings) - - analyzer.Run = func(pass *analysis.Pass) (any, error) { - if ruleErr != nil { - return nil, ruleErr - } - - issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil + a := &analysis.Analyzer{ + Name: linterName, + Doc: "Finds commonly misspelled English words", + Run: func(pass *analysis.Pass) (any, error) { + for _, file := range pass.Files { + err := runMisspellOnFile(pass, file, replacer, settings.Mode) + if err != nil { + return nil, err + } } - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - for _, filename := range fileNames { - lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode) - if err != nil { - return nil, err - } - - for i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) - } + }, } - return issues, nil + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) } func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) { @@ -112,10 +78,17 @@ func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replac return replacer, nil } -func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) { - fileContent, err := lintCtx.FileCache.GetFileBytes(filename) +func runMisspellOnFile(pass *analysis.Pass, file *ast.File, replacer *misspell.Replacer, mode string) error { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + return nil + } + + // Uses the non-adjusted file to work with cgo: + // if we read the real file, the positions are wrong in some cases. + fileContent, err := pass.ReadFile(pass.Fset.PositionFor(file.Pos(), false).Filename) if err != nil { - return nil, fmt.Errorf("can't get file %s contents: %w", filename, err) + return fmt.Errorf("can't get file %s contents: %w", position.Filename, err) } // `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments. @@ -129,36 +102,31 @@ func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *missp replace = replacer.Replace } - _, diffs := replace(string(fileContent)) + f := pass.Fset.File(file.Pos()) - var res []result.Issue + _, diffs := replace(string(fileContent)) for _, diff := range diffs { text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) - pos := token.Position{ - Filename: filename, - Line: diff.Line, - Column: diff.Column + 1, - } - - replacement := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: diff.Column, - Length: len(diff.Original), - NewString: diff.Corrected, - }, - } - - res = append(res, result.Issue{ - Pos: pos, - Text: text, - FromLinter: linterName, - Replacement: replacement, + start := f.LineStart(diff.Line) + token.Pos(diff.Column) + end := f.LineStart(diff.Line) + token.Pos(diff.Column+len(diff.Original)) + + pass.Report(analysis.Diagnostic{ + Pos: start, + End: end, + Message: text, + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(diff.Corrected), + }}, + }}, }) } - return res, nil + return nil } func appendExtraWords(replacer *misspell.Replacer, extraWords []config.MisspellExtraWords) error { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go index 30047abfc..a4e9ceff2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go @@ -8,11 +8,11 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(setting *config.MustTagSettings) *goanalysis.Linter { +func New(settings *config.MustTagSettings) *goanalysis.Linter { var funcs []musttag.Func - if setting != nil { - for _, fn := range setting.Functions { + if settings != nil { + for _, fn := range settings.Functions { funcs = append(funcs, musttag.Func{ Name: fn.Name, Tag: fn.Tag, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go index 43be973b0..b72538fd1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go @@ -1,37 +1,21 @@ package nestif import ( - "sort" - "sync" - "github.com/nakabonne/nestif" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "nestif" func New(settings *config.NestifSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runNestIf(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() + runNestIf(pass, settings) return nil, nil }, @@ -42,37 +26,34 @@ func New(settings *config.NestifSettings) *goanalysis.Linter { "Reports deeply nested if statements", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue { +func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) { checker := &nestif.Checker{ MinComplexity: settings.MinComplexity, } - var lintIssues []nestif.Issue - for _, f := range pass.Files { - lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...) - } + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } - if len(lintIssues) == 0 { - return nil - } + issues := checker.Check(file, pass.Fset) + if len(issues) == 0 { + continue + } - sort.SliceStable(lintIssues, func(i, j int) bool { - return lintIssues[i].Complexity > lintIssues[j].Complexity - }) + nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false) - issues := make([]goanalysis.Issue, 0, len(lintIssues)) - for _, i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: linterName, - }, pass)) - } + f := pass.Fset.File(file.Pos()) - return issues + for _, issue := range issues { + pass.Report(analysis.Diagnostic{ + Pos: f.LineStart(goanalysis.AdjustPos(issue.Pos.Line, nonAdjPosition.Line, position.Line)), + Message: issue.Message, + }) + } + } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go new file mode 100644 index 000000000..8349377b7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go @@ -0,0 +1,23 @@ +package nilnesserr + +import ( + "github.com/alingse/nilnesserr" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" +) + +func New() *goanalysis.Linter { + a, err := nilnesserr.NewAnalyzer(nilnesserr.LinterSetting{}) + if err != nil { + internal.LinterLogger.Fatalf("nilnesserr: create analyzer: %v", err) + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go index d8d677d99..ed25dec71 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go @@ -11,13 +11,13 @@ import ( func New(settings *config.NilNilSettings) *goanalysis.Linter { a := analyzer.New() - cfgMap := make(map[string]map[string]any) + cfg := make(map[string]map[string]any) if settings != nil { - cfgMap[a.Name] = map[string]any{ + cfg[a.Name] = map[string]any{ "detect-opposite": settings.DetectOpposite, } if len(settings.CheckedTypes) != 0 { - cfgMap[a.Name]["checked-types"] = settings.CheckedTypes + cfg[a.Name]["checked-types"] = settings.CheckedTypes } } @@ -25,7 +25,7 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ). WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go new file mode 100644 index 000000000..5e9ba4117 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go @@ -0,0 +1,41 @@ +package internal + +import ( + "fmt" + "strings" + "unicode" +) + +func formatExtraLeadingSpace(fullDirective string) string { + return fmt.Sprintf("directive `%s` should not have more than one leading space", fullDirective) +} + +func formatNotMachine(fullDirective string) string { + expected := fullDirective[:2] + strings.TrimLeftFunc(fullDirective[2:], unicode.IsSpace) + return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", + fullDirective, expected) +} + +func formatNotSpecific(fullDirective, directiveWithOptionalLeadingSpace string) string { + return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", + fullDirective, directiveWithOptionalLeadingSpace) +} + +func formatParseError(fullDirective, directiveWithOptionalLeadingSpace string) string { + return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", + fullDirective, + directiveWithOptionalLeadingSpace) +} + +func formatNoExplanation(fullDirective, fullDirectiveWithoutExplanation string) string { + return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", + fullDirective, fullDirectiveWithoutExplanation) +} + +func formatUnusedCandidate(fullDirective, expectedLinter string) string { + details := fmt.Sprintf("directive `%s` is unused", fullDirective) + if expectedLinter != "" { + details += fmt.Sprintf(" for linter %q", expectedLinter) + } + return details +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go index 08dd74378..21cd20124 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go @@ -2,123 +2,17 @@ package internal import ( - "fmt" - "go/ast" "go/token" "regexp" "strings" - "unicode" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/result" ) -type BaseIssue struct { - fullDirective string - directiveWithOptionalLeadingSpace string - position token.Position - replacement *result.Replacement -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (b BaseIssue) Position() token.Position { - return b.position -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (b BaseIssue) Replacement() *result.Replacement { - return b.replacement -} - -type ExtraLeadingSpace struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i ExtraLeadingSpace) Details() string { - return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) -} - -func (i ExtraLeadingSpace) String() string { return toString(i) } - -type NotMachine struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NotMachine) Details() string { - expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) - return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", - i.fullDirective, expected) -} - -func (i NotMachine) String() string { return toString(i) } - -type NotSpecific struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NotSpecific) Details() string { - return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", - i.fullDirective, i.directiveWithOptionalLeadingSpace) -} - -func (i NotSpecific) String() string { return toString(i) } - -type ParseError struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i ParseError) Details() string { - return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", - i.fullDirective, - i.directiveWithOptionalLeadingSpace) -} - -func (i ParseError) String() string { return toString(i) } - -type NoExplanation struct { - BaseIssue - fullDirectiveWithoutExplanation string -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NoExplanation) Details() string { - return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", - i.fullDirective, i.fullDirectiveWithoutExplanation) -} - -func (i NoExplanation) String() string { return toString(i) } - -type UnusedCandidate struct { - BaseIssue - ExpectedLinter string -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i UnusedCandidate) Details() string { - details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) - if i.ExpectedLinter != "" { - details += fmt.Sprintf(" for linter %q", i.ExpectedLinter) - } - return details -} - -func (i UnusedCandidate) String() string { return toString(i) } - -func toString(issue Issue) string { - return fmt.Sprintf("%s at %s", issue.Details(), issue.Position()) -} - -type Issue interface { - Details() string - Position() token.Position - String() string - Replacement() *result.Replacement -} - -type Needs uint +const LinterName = "nolintlint" const ( NeedsMachineOnly Needs = 1 << iota @@ -128,6 +22,10 @@ const ( NeedsAll = NeedsMachineOnly | NeedsSpecific | NeedsExplanation ) +type Needs uint + +const commentMark = "//" + var commentPattern = regexp.MustCompile(`^//\s*(nolint)(:\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*)?\b`) // matches a complete nolint directive @@ -157,15 +55,10 @@ var ( ) //nolint:funlen,gocyclo // the function is going to be refactored in the future -func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { - var issues []Issue - - for _, node := range nodes { - file, ok := node.(*ast.File) - if !ok { - continue - } +func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { + var issues []goanalysis.Issue + for _, file := range pass.Files { for _, c := range file.Comments { for _, comment := range c.List { if !commentPattern.MatchString(comment.Text) { @@ -180,47 +73,58 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { leadingSpace = leadingSpaceMatches[1] } - directiveWithOptionalLeadingSpace := "//" + directiveWithOptionalLeadingSpace := commentMark if leadingSpace != "" { directiveWithOptionalLeadingSpace += " " } - split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") + split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], commentMark) directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) - pos := fset.Position(comment.Pos()) - end := fset.Position(comment.End()) - - base := BaseIssue{ - fullDirective: comment.Text, - directiveWithOptionalLeadingSpace: directiveWithOptionalLeadingSpace, - position: pos, - } + pos := pass.Fset.Position(comment.Pos()) + end := pass.Fset.Position(comment.End()) // check for, report and eliminate leading spaces, so we can check for other issues if leadingSpace != "" { - removeWhitespace := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: pos.Column + 1, - Length: len(leadingSpace), - NewString: "", - }, - } + removeWhitespace := []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos.Offset), + End: token.Pos(pos.Offset + len(commentMark) + len(leadingSpace)), + NewText: []byte(commentMark), + }}, + }} + if (l.needs & NeedsMachineOnly) != 0 { - issue := NotMachine{BaseIssue: base} - issue.BaseIssue.replacement = removeWhitespace - issues = append(issues, issue) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatNotMachine(comment.Text), + Pos: pos, + SuggestedFixes: removeWhitespace, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } else if len(leadingSpace) > 1 { - issue := ExtraLeadingSpace{BaseIssue: base} - issue.BaseIssue.replacement = removeWhitespace - issue.BaseIssue.replacement.Inline.NewString = " " // assume a single space was intended - issues = append(issues, issue) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatExtraLeadingSpace(comment.Text), + Pos: pos, + SuggestedFixes: removeWhitespace, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } fullMatches := fullDirectivePattern.FindStringSubmatch(comment.Text) if len(fullMatches) == 0 { - issues = append(issues, ParseError{BaseIssue: base}) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatParseError(comment.Text, directiveWithOptionalLeadingSpace), + Pos: pos, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) + continue } @@ -230,7 +134,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if lintersText != "" && !strings.HasPrefix(lintersText, "all") { lls := strings.Split(lintersText, ",") linters = make([]string, 0, len(lls)) - rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") + rangeStart := (pos.Column - 1) + len(commentMark) + len(leadingSpace) + len("nolint:") for i, ll := range lls { rangeEnd := rangeStart + len(ll) if i < len(lls)-1 { @@ -246,46 +150,59 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if (l.needs & NeedsSpecific) != 0 { if len(linters) == 0 { - issues = append(issues, NotSpecific{BaseIssue: base}) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatNotSpecific(comment.Text, directiveWithOptionalLeadingSpace), + Pos: pos, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } // when detecting unused directives, we send all the directives through and filter them out in the nolint processor if (l.needs & NeedsUnused) != 0 { - removeNolintCompletely := &result.Replacement{} + removeNolintCompletely := []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos.Offset), + End: token.Pos(end.Offset), + NewText: nil, + }}, + }} - startCol := pos.Column - 1 - - if startCol == 0 { - // if the directive starts from a new line, remove the line - removeNolintCompletely.NeedOnlyDelete = true - } else { - removeNolintCompletely.Inline = &result.InlineFix{ - StartCol: startCol, - Length: end.Column - pos.Column, - NewString: "", + if len(linters) == 0 { + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatUnusedCandidate(comment.Text, ""), + Pos: pos, + ExpectNoLint: true, + SuggestedFixes: removeNolintCompletely, } - } - if len(linters) == 0 { - issue := UnusedCandidate{BaseIssue: base} - issue.replacement = removeNolintCompletely - issues = append(issues, issue) + issues = append(issues, goanalysis.NewIssue(issue, pass)) } else { for _, linter := range linters { - issue := UnusedCandidate{BaseIssue: base, ExpectedLinter: linter} - // only offer replacement if there is a single linter + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatUnusedCandidate(comment.Text, linter), + Pos: pos, + ExpectNoLint: true, + ExpectedNoLintLinter: linter, + } + + // only offer SuggestedFix if there is a single linter // because of issues around commas and the possibility of all // linters being removed if len(linters) == 1 { - issue.replacement = removeNolintCompletely + issue.SuggestedFixes = removeNolintCompletely } - issues = append(issues, issue) + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } } - if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { + if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == commentMark) { needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation // otherwise, check if we are excluding all the mentioned linters for _, ll := range linters { @@ -297,10 +214,14 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if needsExplanation { fullDirectiveWithoutExplanation := trailingBlankExplanation.ReplaceAllString(comment.Text, "") - issues = append(issues, NoExplanation{ - BaseIssue: base, - fullDirectiveWithoutExplanation: fullDirectiveWithoutExplanation, - }) + + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatNoExplanation(comment.Text, fullDirectiveWithoutExplanation), + Pos: pos, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go index 9f04454a5..e1c878628 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -2,31 +2,47 @@ package nolintlint import ( "fmt" - "go/ast" "sync" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal" + nolintlint "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -const LinterName = "nolintlint" +const LinterName = nolintlint.LinterName func New(settings *config.NoLintLintSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue + var needs nolintlint.Needs + if settings.RequireExplanation { + needs |= nolintlint.NeedsExplanation + } + if settings.RequireSpecific { + needs |= nolintlint.NeedsSpecific + } + if !settings.AllowUnused { + needs |= nolintlint.NeedsUnused + } + + lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + if err != nil { + internal.LinterLogger.Fatalf("%s: create analyzer: %v", nolintlint.LinterName, err) + } + analyzer := &analysis.Analyzer{ - Name: LinterName, + Name: nolintlint.LinterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runNoLintLint(pass, settings) + issues, err := lnt.Run(pass) if err != nil { - return nil, err + return nil, fmt.Errorf("linter failed to run: %w", err) } if len(issues) == 0 { @@ -42,7 +58,7 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { } return goanalysis.NewLinter( - LinterName, + nolintlint.LinterName, "Reports ill-formed or insufficient nolint directives", []*analysis.Analyzer{analyzer}, nil, @@ -50,55 +66,3 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } - -func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) { - var needs internal.Needs - if settings.RequireExplanation { - needs |= internal.NeedsExplanation - } - if settings.RequireSpecific { - needs |= internal.NeedsSpecific - } - if !settings.AllowUnused { - needs |= internal.NeedsUnused - } - - lnt, err := internal.NewLinter(needs, settings.AllowNoExplanation) - if err != nil { - return nil, err - } - - nodes := make([]ast.Node, 0, len(pass.Files)) - for _, n := range pass.Files { - nodes = append(nodes, n) - } - - lintIssues, err := lnt.Run(pass.Fset, nodes...) - if err != nil { - return nil, fmt.Errorf("linter failed to run: %w", err) - } - - var issues []goanalysis.Issue - - for _, i := range lintIssues { - expectNoLint := false - var expectedNolintLinter string - if ii, ok := i.(internal.UnusedCandidate); ok { - expectedNolintLinter = ii.ExpectedLinter - expectNoLint = true - } - - issue := &result.Issue{ - FromLinter: LinterName, - Text: i.Details(), - Pos: i.Position(), - ExpectNoLint: expectNoLint, - ExpectedNoLintLinter: expectedNolintLinter, - Replacement: i.Replacement(), - } - - issues = append(issues, goanalysis.NewIssue(issue, pass)) - } - - return issues, nil -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint/perfsprint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint/perfsprint.go index a4ead1914..c34f7e4c6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint/perfsprint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint/perfsprint.go @@ -16,11 +16,19 @@ func New(settings *config.PerfSprintSettings) *goanalysis.Linter { } if settings != nil { + cfg[a.Name]["integer-format"] = settings.IntegerFormat cfg[a.Name]["int-conversion"] = settings.IntConversion + + cfg[a.Name]["error-format"] = settings.ErrorFormat cfg[a.Name]["err-error"] = settings.ErrError cfg[a.Name]["errorf"] = settings.ErrorF + + cfg[a.Name]["string-format"] = settings.StringFormat cfg[a.Name]["sprintf1"] = settings.SprintF1 cfg[a.Name]["strconcat"] = settings.StrConcat + + cfg[a.Name]["bool-format"] = settings.BoolFormat + cfg[a.Name]["hex-format"] = settings.HexFormat } return goanalysis.NewLinter( diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go index ce7ff9d59..17e86c98e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go @@ -2,7 +2,6 @@ package prealloc import ( "fmt" - "sync" "github.com/alexkohler/prealloc/pkg" "golang.org/x/tools/go/analysis" @@ -10,29 +9,16 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "prealloc" func New(settings *config.PreallocSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runPreAlloc(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() + runPreAlloc(pass, settings) return nil, nil }, @@ -43,23 +29,16 @@ func New(settings *config.PreallocSettings) *goanalysis.Linter { "Finds slice declarations that could potentially be pre-allocated", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue { - var issues []goanalysis.Issue - +func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) { hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops) for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(hint.Pos), - Text: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: hint.Pos, + Message: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), + }) } - - return issues } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go index 302ce67b8..423e4ca9e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go @@ -1,21 +1,14 @@ package protogetter import ( - "sync" - "github.com/ghostiam/protogetter" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var cfg protogetter.Config if settings != nil { cfg = protogetter.Config{ @@ -25,50 +18,13 @@ func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend, } } - cfg.Mode = protogetter.GolangciLintMode a := protogetter.NewAnalyzer(&cfg) - a.Run = func(pass *analysis.Pass) (any, error) { - pgIssues, err := protogetter.Run(pass, &cfg) - if err != nil { - return nil, err - } - - issues := make([]goanalysis.Issue, len(pgIssues)) - for i, issue := range pgIssues { - report := &result.Issue{ - FromLinter: a.Name, - Pos: issue.Pos, - Text: issue.Message, - Replacement: &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: issue.InlineFix.StartCol, - Length: issue.InlineFix.Length, - NewString: issue.InlineFix.NewString, - }, - }, - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go index 8b030f15d..3af4885b4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go @@ -4,11 +4,19 @@ import ( "github.com/raeperd/recvcheck" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New() *goanalysis.Linter { - a := recvcheck.Analyzer +func New(settings *config.RecvcheckSettings) *goanalysis.Linter { + var cfg recvcheck.Settings + + if settings != nil { + cfg.DisableBuiltin = settings.DisableBuiltin + cfg.Exclusions = settings.Exclusions + } + + a := recvcheck.NewAnalyzer(cfg) return goanalysis.NewLinter( a.Name, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go index 056a258e0..460281287 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go @@ -2,11 +2,14 @@ package revive import ( "bytes" + "cmp" "encoding/json" "fmt" "go/token" "os" "reflect" + "slices" + "strings" "sync" "github.com/BurntSushi/toml" @@ -26,7 +29,10 @@ import ( const linterName = "revive" -var debugf = logutils.Debug(logutils.DebugKeyRevive) +var ( + debugf = logutils.Debug(logutils.DebugKeyRevive) + isDebug = logutils.HaveDebugTag(logutils.DebugKeyRevive) +) // jsonObject defines a JSON object of a failure type jsonObject struct { @@ -90,6 +96,8 @@ func newWrapper(settings *config.ReviveSettings) (*wrapper, error) { return nil, err } + displayRules(conf) + conf.GoVersion, err = hcversion.NewVersion(settings.Go) if err != nil { return nil, err @@ -114,7 +122,7 @@ func newWrapper(settings *config.ReviveSettings) (*wrapper, error) { } func (w *wrapper) run(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { - packages := [][]string{internal.GetFileNames(pass)} + packages := [][]string{internal.GetGoFileNames(pass)} failures, err := w.revive.Lint(packages, w.lintingRules, *w.conf) if err != nil { @@ -164,7 +172,7 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { lineRangeTo = object.Position.Start.Line } - return goanalysis.NewIssue(&result.Issue{ + issue := &result.Issue{ Severity: string(object.Severity), Text: fmt.Sprintf("%s: %s", object.RuleName, object.Failure.Failure), Pos: token.Position{ @@ -178,14 +186,31 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { To: lineRangeTo, }, FromLinter: linterName, - }, pass) + } + + if object.ReplacementLine != "" { + f := pass.Fset.File(token.Pos(object.Position.Start.Offset)) + + // Skip cgo files because the positions are wrong. + if object.GetFilename() == f.Name() { + issue.SuggestedFixes = []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: f.LineStart(object.Position.Start.Line), + End: goanalysis.EndOfLinePos(f, object.Position.End.Line), + NewText: []byte(object.ReplacementLine), + }}, + }} + } + } + + return goanalysis.NewIssue(issue, pass) } // This function mimics the GetConfig function of revive. // This allows to get default values and right types. // https://github.com/golangci/golangci-lint/issues/1745 -// https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L220 -// https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L172-L178 +// https://github.com/mgechev/revive/blob/v1.6.0/config/config.go#L230 +// https://github.com/mgechev/revive/blob/v1.6.0/config/config.go#L182-L188 func getConfig(cfg *config.ReviveSettings) (*lint.Config, error) { conf := defaultConfig() @@ -218,8 +243,6 @@ func getConfig(cfg *config.ReviveSettings) (*lint.Config, error) { conf.Rules[k] = r } - debugf("revive configuration: %#v", conf) - return conf, nil } @@ -284,7 +307,7 @@ func safeTomlSlice(r []any) []any { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L16 +// Extracted from https://github.com/mgechev/revive/blob/v1.6.0/config/config.go#L16 var defaultRules = []lint.Rule{ &rule.VarDeclarationsRule{}, &rule.PackageCommentsRule{}, @@ -370,6 +393,8 @@ var allRules = append([]lint.Rule{ &rule.CommentsDensityRule{}, &rule.FileLengthLimitRule{}, &rule.FilenameFormatRule{}, + &rule.RedundantBuildTagRule{}, + &rule.UseErrorsNewRule{}, }, defaultRules...) const defaultConfidence = 0.8 @@ -379,12 +404,8 @@ const defaultConfidence = 0.8 func normalizeConfig(cfg *lint.Config) { // NOTE(ldez): this custom section for golangci-lint should be kept. // --- - if cfg.Confidence == 0 { - cfg.Confidence = defaultConfidence - } - if cfg.Severity == "" { - cfg.Severity = lint.SeverityWarning - } + cfg.Confidence = cmp.Or(cfg.Confidence, defaultConfidence) + cfg.Severity = cmp.Or(cfg.Severity, lint.SeverityWarning) // --- if len(cfg.Rules) == 0 { @@ -433,3 +454,36 @@ func defaultConfig() *lint.Config { } return &defaultConfig } + +func displayRules(conf *lint.Config) { + if !isDebug { + return + } + + var enabledRules []string + for k, r := range conf.Rules { + if !r.Disabled { + enabledRules = append(enabledRules, k) + } + } + + slices.Sort(enabledRules) + + debugf("All available rules (%d): %s.", len(allRules), strings.Join(extractRulesName(allRules), ", ")) + debugf("Default rules (%d): %s.", len(allRules), strings.Join(extractRulesName(allRules), ", ")) + debugf("Enabled by config rules (%d): %s.", len(enabledRules), strings.Join(enabledRules, ", ")) + + debugf("revive configuration: %#v", conf) +} + +func extractRulesName(rules []lint.Rule) []string { + var names []string + + for _, r := range rules { + names = append(names, r.Name()) + } + + slices.Sort(names) + + return names +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck/spancheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck/spancheck.go index a800a1705..efdc1d0bf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck/spancheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck/spancheck.go @@ -12,16 +12,16 @@ func New(settings *config.SpancheckSettings) *goanalysis.Linter { cfg := spancheck.NewDefaultConfig() if settings != nil { - if settings.Checks != nil { + if len(settings.Checks) > 0 { cfg.EnabledChecks = settings.Checks } - if settings.IgnoreCheckSignatures != nil { + if len(settings.IgnoreCheckSignatures) > 0 { cfg.IgnoreChecksSignaturesSlice = settings.IgnoreCheckSignatures } - if settings.ExtraStartSpanSignatures != nil { - cfg.StartSpanMatchersSlice = settings.ExtraStartSpanSignatures + if len(settings.ExtraStartSpanSignatures) > 0 { + cfg.StartSpanMatchersSlice = append(cfg.StartSpanMatchersSlice, settings.ExtraStartSpanSignatures...) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go index f438c51b5..7c8a0c8b0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go @@ -1,22 +1,15 @@ package tagalign import ( - "sync" - "github.com/4meepo/tagalign" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New(settings *config.TagAlignSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)} + var options []tagalign.Option if settings != nil { options = append(options, tagalign.WithAlign(settings.Align)) @@ -32,44 +25,11 @@ func New(settings *config.TagAlignSettings) *goanalysis.Linter { } analyzer := tagalign.NewAnalyzer(options...) - analyzer.Run = func(pass *analysis.Pass) (any, error) { - taIssues := tagalign.Run(pass, options...) - - issues := make([]goanalysis.Issue, len(taIssues)) - for i, issue := range taIssues { - report := &result.Issue{ - FromLinter: analyzer.Name, - Pos: issue.Pos, - Text: issue.Message, - Replacement: &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: issue.InlineFix.StartCol, - Length: issue.InlineFix.Length, - NewString: issue.InlineFix.NewString, - }, - }, - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } return goanalysis.NewLinter( analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go index d1674c3e9..08215c3a5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go @@ -10,10 +10,12 @@ import ( func New(settings *config.TagliatelleSettings) *goanalysis.Linter { cfg := tagliatelle.Config{ - Rules: map[string]string{ - "json": "camel", - "yaml": "camel", - "header": "header", + Base: tagliatelle.Base{ + Rules: map[string]string{ + "json": "camel", + "yaml": "camel", + "header": "header", + }, }, } @@ -21,7 +23,23 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { for k, v := range settings.Case.Rules { cfg.Rules[k] = v } + + cfg.ExtendedRules = toExtendedRules(settings.Case.ExtendedRules) cfg.UseFieldName = settings.Case.UseFieldName + cfg.IgnoredFields = settings.Case.IgnoredFields + + for _, override := range settings.Case.Overrides { + cfg.Overrides = append(cfg.Overrides, tagliatelle.Overrides{ + Base: tagliatelle.Base{ + Rules: override.Rules, + ExtendedRules: toExtendedRules(override.ExtendedRules), + UseFieldName: override.UseFieldName, + IgnoredFields: override.IgnoredFields, + Ignore: override.Ignore, + }, + Package: override.Package, + }) + } } a := tagliatelle.New(cfg) @@ -31,5 +49,19 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, - ).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func toExtendedRules(src map[string]config.TagliatelleExtendedRule) map[string]tagliatelle.ExtendedRule { + result := make(map[string]tagliatelle.ExtendedRule, len(src)) + + for k, v := range src { + result[k] = tagliatelle.ExtendedRule{ + Case: v.Case, + ExtraInitialisms: v.ExtraInitialisms, + InitialismOverrides: v.InitialismOverrides, + } + } + + return result } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go index 632152712..f617da553 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go @@ -10,19 +10,19 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.TestpackageSettings) *goanalysis.Linter { +func New(settings *config.TestpackageSettings) *goanalysis.Linter { a := testpackage.NewAnalyzer() - var settings map[string]map[string]any - if cfg != nil { - settings = map[string]map[string]any{ + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ a.Name: { - testpackage.SkipRegexpFlagName: cfg.SkipRegexp, - testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","), + testpackage.SkipRegexpFlagName: settings.SkipRegexp, + testpackage.AllowPackagesFlagName: strings.Join(settings.AllowPackages, ","), }, } } - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go index cc6ea755c..04503b9ce 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go @@ -1,10 +1,11 @@ package thelper import ( + "maps" + "slices" "strings" "github.com/kulti/thelper/pkg/analyzer" - "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -12,7 +13,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) -func New(cfg *config.ThelperSettings) *goanalysis.Linter { +func New(settings *config.ThelperSettings) *goanalysis.Linter { a := analyzer.NewAnalyzer() opts := map[string]struct{}{ @@ -33,20 +34,20 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { "tb_first": {}, } - if cfg != nil { - applyTHelperOptions(cfg.Test, "t_", opts) - applyTHelperOptions(cfg.Fuzz, "f_", opts) - applyTHelperOptions(cfg.Benchmark, "b_", opts) - applyTHelperOptions(cfg.TB, "tb_", opts) + if settings != nil { + applyTHelperOptions(settings.Test, "t_", opts) + applyTHelperOptions(settings.Fuzz, "f_", opts) + applyTHelperOptions(settings.Benchmark, "b_", opts) + applyTHelperOptions(settings.TB, "tb_", opts) } if len(opts) == 0 { internal.LinterLogger.Fatalf("thelper: at least one option must be enabled") } - args := maps.Keys(opts) + args := slices.Collect(maps.Keys(opts)) - cfgMap := map[string]map[string]any{ + cfg := map[string]map[string]any{ a.Name: { "checks": strings.Join(args, ","), }, @@ -56,7 +57,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go index 0fe184736..04c9a223e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go @@ -1,8 +1,6 @@ package unparam import ( - "sync" - "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/packages" @@ -11,33 +9,21 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "unparam" func New(settings *config.UnparamSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runUnparam(pass, settings) + err := runUnparam(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -51,12 +37,10 @@ func New(settings *config.UnparamSettings) *goanalysis.Linter { if settings.Algo != "cha" { lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) { +func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error { ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) ssaPkg := ssa.Pkg @@ -74,17 +58,15 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanal unparamIssues, err := c.Check() if err != nil { - return nil, err + return err } - var issues []goanalysis.Issue for _, i := range unparamIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(i.Pos()), - Text: i.Message(), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: i.Pos(), + Message: i.Message(), + }) } - return issues, nil + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go index 050e47f24..00f7d9742 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go @@ -8,24 +8,24 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { +func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { a := analyzer.New() - cfgMap := make(map[string]map[string]any) - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - analyzer.ConstantKindFlag: cfg.ConstantKind, - analyzer.CryptoHashFlag: cfg.CryptoHash, - analyzer.HTTPMethodFlag: cfg.HTTPMethod, - analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode, - analyzer.OSDevNullFlag: cfg.OSDevNull, - analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath, - analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel, - analyzer.SyslogPriorityFlag: cfg.SyslogPriority, - analyzer.TimeLayoutFlag: cfg.TimeLayout, - analyzer.TimeMonthFlag: cfg.TimeMonth, - analyzer.TimeWeekdayFlag: cfg.TimeWeekday, - analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme, + cfg := make(map[string]map[string]any) + if settings != nil { + cfg[a.Name] = map[string]any{ + analyzer.ConstantKindFlag: settings.ConstantKind, + analyzer.CryptoHashFlag: settings.CryptoHash, + analyzer.HTTPMethodFlag: settings.HTTPMethod, + analyzer.HTTPStatusCodeFlag: settings.HTTPStatusCode, + analyzer.OSDevNullFlag: settings.OSDevNull != nil && *settings.OSDevNull, + analyzer.RPCDefaultPathFlag: settings.DefaultRPCPath, + analyzer.SQLIsolationLevelFlag: settings.SQLIsolationLevel, + analyzer.SyslogPriorityFlag: settings.SyslogPriority != nil && *settings.SyslogPriority, + analyzer.TimeLayoutFlag: settings.TimeLayout, + analyzer.TimeMonthFlag: settings.TimeMonth, + analyzer.TimeWeekdayFlag: settings.TimeWeekday, + analyzer.TLSSignatureSchemeFlag: settings.TLSSignatureScheme, } } @@ -33,6 +33,6 @@ func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go new file mode 100644 index 000000000..a21742fbd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go @@ -0,0 +1,33 @@ +package usetesting + +import ( + "github.com/ldez/usetesting" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" +) + +func New(settings *config.UseTestingSettings) *goanalysis.Linter { + a := usetesting.NewAnalyzer() + + cfg := make(map[string]map[string]any) + if settings != nil { + cfg[a.Name] = map[string]any{ + "contextbackground": settings.ContextBackground, + "contexttodo": settings.ContextTodo, + "oschdir": settings.OSChdir, + "osmkdirtemp": settings.OSMkdirTemp, + "ossetenv": settings.OSSetenv, + "ostempdir": settings.OSTempDir, + "oscreatetemp": settings.OSCreateTemp, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go index 721bfada1..d45969efc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go @@ -1,28 +1,17 @@ package whitespace import ( - "fmt" - "sync" - "github.com/ultraware/whitespace" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -const linterName = "whitespace" - func New(settings *config.WhitespaceSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var wsSettings whitespace.Settings if settings != nil { wsSettings = whitespace.Settings{ - Mode: whitespace.RunningModeGolangCI, MultiIf: settings.MultiIf, MultiFunc: settings.MultiFunc, } @@ -35,68 +24,5 @@ func New(settings *config.WhitespaceSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(_ *linter.Context) { - a.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runWhitespace(pass, wsSettings) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) { - lintIssues := whitespace.Run(pass, &wsSettings) - - issues := make([]goanalysis.Issue, len(lintIssues)) - for i, issue := range lintIssues { - report := &result.Issue{ - FromLinter: linterName, - Pos: pass.Fset.PositionFor(issue.Diagnostic, false), - Text: issue.Message, - } - - switch issue.MessageType { - case whitespace.MessageTypeRemove: - if len(issue.LineNumbers) == 0 { - continue - } - - report.LineRange = &result.Range{ - From: issue.LineNumbers[0], - To: issue.LineNumbers[len(issue.LineNumbers)-1], - } - - report.Replacement = &result.Replacement{NeedOnlyDelete: true} - - case whitespace.MessageTypeAdd: - report.Pos = pass.Fset.PositionFor(issue.FixStart, false) - report.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: 0, - Length: 1, - NewString: "\n\t", - }, - } - - default: - return nil, fmt.Errorf("unknown message type: %v", issue.MessageType) - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - return issues, nil + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go index 96ec2eeae..b2f5ec742 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go @@ -11,6 +11,8 @@ import ( func New(settings *config.WrapcheckSettings) *goanalysis.Linter { cfg := wrapcheck.NewDefaultConfig() if settings != nil { + cfg.ExtraIgnoreSigs = settings.ExtraIgnoreSigs + if len(settings.IgnoreSigs) != 0 { cfg.IgnoreSigs = settings.IgnoreSigs } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go index 7b748d8e9..beb71f722 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go @@ -2,53 +2,39 @@ package goutil import ( "context" - "encoding/json" "fmt" "os" - "os/exec" - "strings" "time" + "github.com/ldez/grignotin/goenv" + "github.com/golangci/golangci-lint/pkg/logutils" ) type EnvKey string -const ( - EnvGoCache EnvKey = "GOCACHE" - EnvGoRoot EnvKey = "GOROOT" -) - type Env struct { - vars map[string]string - log logutils.Log - debugf logutils.DebugFunc + vars map[string]string + log logutils.Log } func NewEnv(log logutils.Log) *Env { return &Env{ - vars: map[string]string{}, - log: log, - debugf: logutils.Debug(logutils.DebugKeyEnv), + vars: map[string]string{}, + log: log, } } func (e Env) Discover(ctx context.Context) error { startedAt := time.Now() - //nolint:gosec // Everything is static here. - cmd := exec.CommandContext(ctx, "go", "env", "-json", string(EnvGoCache), string(EnvGoRoot)) - - out, err := cmd.Output() + var err error + e.vars, err = goenv.Get(ctx, goenv.GOCACHE, goenv.GOROOT) if err != nil { - return fmt.Errorf("failed to run '%s': %w", strings.Join(cmd.Args, " "), err) - } - - if err = json.Unmarshal(out, &e.vars); err != nil { - return fmt.Errorf("failed to parse '%s' json: %w", strings.Join(cmd.Args, " "), err) + return fmt.Errorf("%w", err) } - e.debugf("Read go env for %s: %#v", time.Since(startedAt), e.vars) + e.log.Infof("Read go env for %s: %#v", time.Since(startedAt), e.vars) return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go index d04a11b81..2ac5a2d2c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go @@ -7,7 +7,6 @@ import ( "github.com/golangci/golangci-lint/internal/cache" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/exitcodes" - "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/goanalysis/load" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" @@ -18,19 +17,17 @@ type ContextBuilder struct { pkgLoader *PackageLoader - fileCache *fsutils.FileCache - pkgCache *cache.Cache + pkgCache *cache.Cache loadGuard *load.Guard } func NewContextBuilder(cfg *config.Config, pkgLoader *PackageLoader, - fileCache *fsutils.FileCache, pkgCache *cache.Cache, loadGuard *load.Guard, + pkgCache *cache.Cache, loadGuard *load.Guard, ) *ContextBuilder { return &ContextBuilder{ cfg: cfg, pkgLoader: pkgLoader, - fileCache: fileCache, pkgCache: pkgCache, loadGuard: loadGuard, } @@ -55,7 +52,6 @@ func (cl *ContextBuilder) Build(ctx context.Context, log logutils.Log, linters [ Cfg: cl.cfg, Log: log, - FileCache: cl.fileCache, PkgCache: cl.pkgCache, LoadGuard: cl.loadGuard, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go index 6d6d4b17e..20bed6a71 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -164,12 +164,16 @@ func (lc *Config) WithNoopFallback(cfg *config.Config, cond func(cfg *config.Con } func IsGoLowerThanGo122() func(cfg *config.Config) error { + return isGoLowerThanGo("1.22") +} + +func isGoLowerThanGo(v string) func(cfg *config.Config) error { return func(cfg *config.Config) error { - if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, "1.22") { + if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, v) { return nil } - return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go 1.22", cfg.Run.Go) + return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go %s", cfg.Run.Go, v) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go index 9f29b5c4c..6986b6231 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go @@ -7,7 +7,6 @@ import ( "github.com/golangci/golangci-lint/internal/cache" "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/goanalysis/load" "github.com/golangci/golangci-lint/pkg/logutils" ) @@ -20,9 +19,8 @@ type Context struct { // version for each of packages OriginalPackages []*packages.Package - Cfg *config.Config - FileCache *fsutils.FileCache - Log logutils.Log + Cfg *config.Config + Log logutils.Log PkgCache *cache.Cache LoadGuard *load.Guard diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go index d2a2dc3d0..ddeb99e14 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go @@ -25,7 +25,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/errorlint" "github.com/golangci/golangci-lint/pkg/golinters/exhaustive" "github.com/golangci/golangci-lint/pkg/golinters/exhaustruct" - "github.com/golangci/golangci-lint/pkg/golinters/exportloopref" + "github.com/golangci/golangci-lint/pkg/golinters/exptostd" "github.com/golangci/golangci-lint/pkg/golinters/fatcontext" "github.com/golangci/golangci-lint/pkg/golinters/forbidigo" "github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert" @@ -72,6 +72,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/nakedret" "github.com/golangci/golangci-lint/pkg/golinters/nestif" "github.com/golangci/golangci-lint/pkg/golinters/nilerr" + "github.com/golangci/golangci-lint/pkg/golinters/nilnesserr" "github.com/golangci/golangci-lint/pkg/golinters/nilnil" "github.com/golangci/golangci-lint/pkg/golinters/nlreturn" "github.com/golangci/golangci-lint/pkg/golinters/noctx" @@ -105,6 +106,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/unparam" "github.com/golangci/golangci-lint/pkg/golinters/unused" "github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars" + "github.com/golangci/golangci-lint/pkg/golinters/usetesting" "github.com/golangci/golangci-lint/pkg/golinters/varnamelen" "github.com/golangci/golangci-lint/pkg/golinters/wastedassign" "github.com/golangci/golangci-lint/pkg/golinters/whitespace" @@ -160,7 +162,8 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). - WithURL("https://github.com/lasiar/canonicalHeader"), + WithAutoFix(). + WithURL("https://github.com/lasiar/canonicalheader"), linter.NewConfig(containedctx.New()). WithSince("v1.44.0"). @@ -177,28 +180,27 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(copyloopvar.New(&cfg.LintersSettings.CopyLoopVar)). WithSince("v1.57.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/karamaru-alpha/copyloopvar"). WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), linter.NewConfig(cyclop.New(&cfg.LintersSettings.Cyclop)). WithSince("v1.37.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetComplexity). WithURL("https://github.com/bkielbasa/cyclop"), linter.NewConfig(decorder.New(&cfg.LintersSettings.Decorder)). WithSince("v1.44.0"). - WithPresets(linter.PresetFormatting, linter.PresetStyle). + WithPresets(linter.PresetStyle). WithURL("https://gitlab.com/bosi/decorder"), linter.NewConfig(linter.NewNoopDeprecated("deadcode", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetUnused). - WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"). + WithURL("https://github.com/remyoudompheng/go-misc/tree/HEAD/deadcode"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), - linter.NewConfig(depguard.New(&cfg.LintersSettings.Depguard)). + linter.NewConfig(depguard.New(&cfg.LintersSettings.Depguard, cfg.GetBasePath())). WithSince("v1.4.0"). WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). WithURL("https://github.com/OpenPeeDeeP/depguard"), @@ -216,6 +218,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(dupword.New(&cfg.LintersSettings.DupWord)). WithSince("v1.50.0"). WithPresets(linter.PresetComment). + WithAutoFix(). WithURL("https://github.com/Abirdcfly/dupword"), linter.NewConfig(durationcheck.New()). @@ -247,12 +250,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.32.0"). WithPresets(linter.PresetBugs, linter.PresetError). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/polyfloyd/go-errorlint"), linter.NewConfig(linter.NewNoopDeprecated("execinquery", cfg, linter.DeprecationError)). WithSince("v1.46.0"). WithPresets(linter.PresetSQL). - WithLoadForGoAnalysis(). WithURL("https://github.com/1uf3/execinquery"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.58.0", ""), @@ -265,7 +268,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("exhaustivestruct", cfg, linter.DeprecationError)). WithSince("v1.32.0"). WithPresets(linter.PresetStyle, linter.PresetTest). - WithLoadForGoAnalysis(). WithURL("https://github.com/mbilski/exhaustivestruct"). DeprecatedError("The repository of the linter has been deprecated by the owner.", "v1.46.0", "exhaustruct"), @@ -275,12 +277,19 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithURL("https://github.com/GaijinEntertainment/go-exhaustruct"), - linter.NewConfig(exportloopref.New()). + linter.NewConfig(linter.NewNoopDeprecated("exportloopref", cfg, linter.DeprecationError)). WithSince("v1.28.0"). WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). WithURL("https://github.com/kyoh86/exportloopref"). - DeprecatedWarning("Since Go1.22 (loopvar) this linter is no longer relevant.", "v1.60.2", "copyloopvar"), + DeprecatedError("Since Go1.22 (loopvar) this linter is no longer relevant.", "v1.60.2", "copyloopvar"), + + linter.NewConfig(exptostd.New()). + WithSince("v1.63.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithAutoFix(). + WithURL("https://github.com/ldez/exptostd"), linter.NewConfig(forbidigo.New(&cfg.LintersSettings.Forbidigo)). WithSince("v1.34.0"). @@ -295,12 +304,14 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(forcetypeassert.New()). WithSince("v1.38.0"). WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). WithURL("https://github.com/gostaticanalysis/forcetypeassert"), - linter.NewConfig(fatcontext.New()). + linter.NewConfig(fatcontext.New(&cfg.LintersSettings.Fatcontext)). WithSince("v1.58.0"). WithPresets(linter.PresetPerformance). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/Crocmagnon/fatcontext"), linter.NewConfig(funlen.New(&cfg.LintersSettings.Funlen)). @@ -318,6 +329,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.51.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/nunnatsa/ginkgolinter"), linter.NewConfig(gocheckcompilerdirectives.New()). @@ -379,6 +391,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetStyle, linter.PresetError). WithLoadForGoAnalysis(). WithAlternativeNames("goerr113"). + WithAutoFix(). WithURL("https://github.com/Djarvur/go-err113"), linter.NewConfig(gofmt.New(&cfg.LintersSettings.Gofmt)). @@ -393,7 +406,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithAutoFix(). WithURL("https://github.com/mvdan/gofumpt"), - linter.NewConfig(goheader.New(&cfg.LintersSettings.Goheader)). + linter.NewConfig(goheader.New(&cfg.LintersSettings.Goheader, cfg.GetBasePath())). WithSince("v1.28.0"). WithPresets(linter.PresetStyle). WithAutoFix(). @@ -407,7 +420,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("golint", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithURL("https://github.com/golang/lint"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), @@ -451,7 +463,8 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithAlternativeNames(megacheckName). - WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), + WithAutoFix(). + WithURL("https://github.com/dominikh/go-tools/tree/HEAD/simple"), linter.NewConfig(gosmopolitan.New(&cfg.LintersSettings.Gosmopolitan)). WithSince("v1.53.0"). @@ -464,6 +477,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.0.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAutoFix(). WithAlternativeNames("vet", "vetshadow"). WithURL("https://pkg.go.dev/cmd/vet"), @@ -482,12 +496,14 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.62.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/uudashr/iface"), linter.NewConfig(importas.New(&cfg.LintersSettings.ImportAs)). WithSince("v1.38.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/julz/importas"), linter.NewConfig(inamedparam.New(&cfg.LintersSettings.Inamedparam)). @@ -508,7 +524,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("interfacer", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithURL("https://github.com/mvdan/interfacer"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", ""), @@ -517,6 +532,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.57.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/ckaznocha/intrange"). WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), @@ -550,7 +566,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("maligned", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). WithURL("https://github.com/mdempsky/maligned"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), @@ -577,6 +592,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nakedret.New(&cfg.LintersSettings.Nakedret)). WithSince("v1.19.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/alexkohler/nakedret"), linter.NewConfig(nestif.New(&cfg.LintersSettings.Nestif)). @@ -590,6 +606,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetBugs). WithURL("https://github.com/gostaticanalysis/nilerr"), + linter.NewConfig(nilnesserr.New()). + WithSince("v1.63.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/alingse/nilnesserr"), + linter.NewConfig(nilnil.New(&cfg.LintersSettings.NilNil)). WithSince("v1.43.0"). WithPresets(linter.PresetStyle). @@ -599,6 +621,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nlreturn.New(&cfg.LintersSettings.Nlreturn)). WithSince("v1.30.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/ssgreg/nlreturn"), linter.NewConfig(noctx.New()). @@ -634,6 +657,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). + WithAutoFix(). WithURL("https://github.com/catenacyber/perfsprint"), linter.NewConfig(prealloc.New(&cfg.LintersSettings.Prealloc)). @@ -664,7 +688,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithURL("https://github.com/curioswitch/go-reassign"), - linter.NewConfig(recvcheck.New()). + linter.NewConfig(recvcheck.New(&cfg.LintersSettings.Recvcheck)). WithSince("v1.62.0"). WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). @@ -674,6 +698,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.37.0"). WithPresets(linter.PresetStyle, linter.PresetMetaLinter). ConsiderSlow(). + WithAutoFix(). WithURL("https://github.com/mgechev/revive"), linter.NewConfig(rowserrcheck.New(&cfg.LintersSettings.RowsErrCheck)). @@ -685,7 +710,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(sloglint.New(&cfg.LintersSettings.SlogLint)). WithSince("v1.55.0"). WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithPresets(linter.PresetStyle). WithURL("https://github.com/go-simpler/sloglint"), linter.NewConfig(linter.NewNoopDeprecated("scopelint", cfg, linter.DeprecationError)). @@ -712,11 +737,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). WithAlternativeNames(megacheckName). + WithAutoFix(). WithURL("https://staticcheck.dev/"), linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -725,24 +750,27 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.20.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). - WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), + WithAutoFix(). + WithURL("https://github.com/dominikh/go-tools/tree/HEAD/stylecheck"), linter.NewConfig(tagalign.New(&cfg.LintersSettings.TagAlign)). WithSince("v1.53.0"). - WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithPresets(linter.PresetStyle). WithAutoFix(). WithURL("https://github.com/4meepo/tagalign"), linter.NewConfig(tagliatelle.New(&cfg.LintersSettings.Tagliatelle)). WithSince("v1.40.0"). WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). WithURL("https://github.com/ldez/tagliatelle"), linter.NewConfig(tenv.New(&cfg.LintersSettings.Tenv)). WithSince("v1.43.0"). WithPresets(linter.PresetTest). WithLoadForGoAnalysis(). - WithURL("https://github.com/sivchari/tenv"), + WithURL("https://github.com/sivchari/tenv"). + DeprecatedWarning("Duplicate feature in another linter.", "v1.64.0", "usetesting"), linter.NewConfig(testableexamples.New()). WithSince("v1.50.0"). @@ -753,6 +781,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithPresets(linter.PresetTest, linter.PresetBugs). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/Antonboom/testifylint"), linter.NewConfig(testpackage.New(&cfg.LintersSettings.Testpackage)). @@ -797,16 +826,23 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithAlternativeNames(megacheckName). ConsiderSlow(). WithChangeTypes(). - WithURL("https://github.com/dominikh/go-tools/tree/master/unused"), + WithURL("https://github.com/dominikh/go-tools/tree/HEAD/unused"), linter.NewConfig(usestdlibvars.New(&cfg.LintersSettings.UseStdlibVars)). WithSince("v1.48.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/sashamelentyev/usestdlibvars"), + linter.NewConfig(usetesting.New(&cfg.LintersSettings.UseTesting)). + WithSince("v1.63.0"). + WithPresets(linter.PresetTest). + WithLoadForGoAnalysis(). + WithAutoFix(). + WithURL("https://github.com/ldez/usetesting"), + linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -838,6 +874,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(wsl.New(&cfg.LintersSettings.WSL)). WithSince("v1.20.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/bombsimon/wsl"), linter.NewConfig(zerologlint.New()). @@ -851,6 +888,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.26.0"). WithPresets(linter.PresetStyle). WithAutoFix(). - WithURL("https://github.com/golangci/golangci-lint/tree/master/pkg/golinters/nolintlint/internal"), + WithURL("https://github.com/golangci/golangci-lint/tree/HEAD/pkg/golinters/nolintlint/internal"), }, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go index 88f3e2ae3..e9f6931f3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_plugin_go.go @@ -1,6 +1,7 @@ package lintersdb import ( + "context" "errors" "fmt" "path/filepath" @@ -9,6 +10,7 @@ import ( "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" @@ -81,8 +83,21 @@ func (b *PluginGoBuilder) loadConfig(cfg *config.Config, name string, settings * // or the linter does not implement the AnalyzerPlugin interface. func (b *PluginGoBuilder) getAnalyzerPlugin(cfg *config.Config, path string, settings any) ([]*analysis.Analyzer, error) { if !filepath.IsAbs(path) { + // Hack for compatibility: + // the previous default (v1) was `cfg` but `fsutils.GetBasePath` defaults on `wd`. + // TODO(ldez): should be removed in v2. + relativePathMode := cfg.Run.RelativePathMode + if relativePathMode == "" { + relativePathMode = fsutils.RelativePathModeCfg + } + + basePath, err := fsutils.GetBasePath(context.Background(), relativePathMode, cfg.GetConfigDir()) + if err != nil { + return nil, fmt.Errorf("get base path: %w", err) + } + // resolve non-absolute paths relative to config file's directory - path = filepath.Join(cfg.GetConfigDir(), path) + path = filepath.Join(basePath, path) } plug, err := plugin.Open(path) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go index 75ab53d7c..4fe57a3b4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go @@ -2,11 +2,11 @@ package lintersdb import ( "fmt" + "maps" "os" "slices" "sort" - - "golang.org/x/exp/maps" + "strings" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" @@ -109,24 +109,25 @@ func (m *Manager) GetOptimizedLinters() ([]*linter.Config, error) { m.combineGoAnalysisLinters(resultLintersSet) - resultLinters := maps.Values(resultLintersSet) - // Make order of execution of linters (go/analysis metalinter and unused) stable. - sort.Slice(resultLinters, func(i, j int) bool { - a, b := resultLinters[i], resultLinters[j] - + resultLinters := slices.SortedFunc(maps.Values(resultLintersSet), func(a *linter.Config, b *linter.Config) int { if b.Name() == linter.LastLinter { - return true + return -1 } if a.Name() == linter.LastLinter { - return false + return 1 } if a.DoesChangeTypes != b.DoesChangeTypes { - return b.DoesChangeTypes // move type-changing linters to the end to optimize speed + // move type-changing linters to the end to optimize speed + if b.DoesChangeTypes { + return -1 + } + return 1 } - return a.Name() < b.Name() + + return strings.Compare(a.Name(), b.Name()) }) return resultLinters, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go index c314166ca..736498b0b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/package.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/ldez/grignotin/goenv" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/pkg/config" @@ -204,12 +205,13 @@ func (l *PackageLoader) debugPrintLoadedPackages(pkgs []*packages.Package) { func (l *PackageLoader) prepareBuildContext() { // Set GOROOT to have working cross-compilation: cross-compiled binaries // have invalid GOROOT. XXX: can't use runtime.GOROOT(). - goroot := l.goenv.Get(goutil.EnvGoRoot) + goroot := l.goenv.Get(goenv.GOROOT) if goroot == "" { return } - os.Setenv(string(goutil.EnvGoRoot), goroot) + _ = os.Setenv(goenv.GOROOT, goroot) + build.Default.GOROOT = goroot build.Default.BuildTags = l.cfg.Run.BuildTags } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index 2c47c7166..4e6d4692b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -10,6 +10,7 @@ import ( "github.com/golangci/golangci-lint/internal/errorutil" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/goformatters" "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -40,6 +41,16 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti // or process other paths (skip files). files := fsutils.NewFiles(lineCache, cfg.Output.PathPrefix) + pathRelativity, err := processors.NewPathRelativity(log, cfg.GetBasePath()) + if err != nil { + return nil, fmt.Errorf("error creating path relativity processor: %w", err) + } + + exclusionPaths, err := processors.NewExclusionPaths(log, &cfg.Linters.LinterExclusions) + if err != nil { + return nil, err + } + skipFilesProcessor, err := processors.NewSkipFiles(cfg.Issues.ExcludeFiles, cfg.Output.PathPrefix) if err != nil { return nil, err @@ -60,45 +71,59 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti return nil, fmt.Errorf("failed to get enabled linters: %w", err) } + metaFormatter, err := goformatters.NewMetaFormatter(log, cfg, enabledLinters) + if err != nil { + return nil, fmt.Errorf("failed to create meta-formatter: %w", err) + } + return &Runner{ Processors: []processors.Processor{ + // Must be the first processor. + processors.NewPathAbsoluter(log), + processors.NewCgo(goenv), - // Must go after Cgo. + // Must be after Cgo. processors.NewFilenameUnadjuster(lintCtx.Packages, log.Child(logutils.DebugKeyFilenameUnadjuster)), - // Must go after FilenameUnadjuster. + // Must be after FilenameUnadjuster. processors.NewInvalidIssue(log.Child(logutils.DebugKeyInvalidIssue)), - // Must be before diff, nolint and exclude autogenerated processor at least. - processors.NewPathPrettifier(), + // Must be after PathAbsoluter, Cgo, FilenameUnadjuster InvalidIssue. + pathRelativity, + + // Must be after PathRelativity. + exclusionPaths, skipFilesProcessor, - skipDirsProcessor, // must be after path prettifier + skipDirsProcessor, - processors.NewAutogeneratedExclude(cfg.Issues.ExcludeGenerated), + processors.NewGeneratedFileFilter(cfg.Linters.LinterExclusions.Generated), // Must be before exclude because users see already marked output and configure excluding by it. processors.NewIdentifierMarker(), - processors.NewExclude(&cfg.Issues), - processors.NewExcludeRules(log.Child(logutils.DebugKeyExcludeRules), files, &cfg.Issues), - processors.NewNolint(log.Child(logutils.DebugKeyNolint), dbManager, enabledLinters), + processors.NewExclusionRules(log.Child(logutils.DebugKeyExclusionRules), files, + &cfg.Linters.LinterExclusions, &cfg.Issues), + + processors.NewNolintFilter(log.Child(logutils.DebugKeyNolintFilter), dbManager, enabledLinters), - processors.NewUniqByLine(cfg), processors.NewDiff(&cfg.Issues), + + // The fixer still needs to see paths for the issues that are relative to the current directory. + processors.NewFixer(cfg, log, fileCache, metaFormatter), + + // Must be after the Fixer. + processors.NewUniqByLine(cfg.Issues.UniqByLine), processors.NewMaxPerFileFromLinter(cfg), processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child(logutils.DebugKeyMaxSameIssues), cfg), processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child(logutils.DebugKeyMaxFromLinter), cfg), + + // Now we can modify the issues for output. processors.NewSourceCode(lineCache, log.Child(logutils.DebugKeySourceCode)), processors.NewPathShortener(), processors.NewSeverity(log.Child(logutils.DebugKeySeverityRules), files, &cfg.Severity), - - // The fixer still needs to see paths for the issues that are relative to the current directory. - processors.NewFixer(cfg, log, fileCache), - - // Now we can modify the issues for output. - processors.NewPathPrefixer(cfg.Output.PathPrefix), - processors.NewSortResults(cfg), + processors.NewPathPrettifier(log, cfg.Output.PathPrefix), + processors.NewSortResults(&cfg.Output), }, lintCtx: lintCtx, Log: log, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go index 3c27e2557..0454d7927 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go @@ -16,36 +16,53 @@ const EnvTestRun = "GL_TEST_RUN" const envDebug = "GL_DEBUG" const ( - DebugKeyAutogenExclude = "autogen_exclude" // Debugs a filter excluding autogenerated source code. - DebugKeyBinSalt = "bin_salt" - DebugKeyConfigReader = "config_reader" - DebugKeyEmpty = "" - DebugKeyEnabledLinters = "enabled_linters" - DebugKeyEnv = "env" // Debugs `go env` command. - DebugKeyExcludeRules = "exclude_rules" - DebugKeyExec = "exec" - DebugKeyFilenameUnadjuster = "filename_unadjuster" - DebugKeyInvalidIssue = "invalid_issue" - DebugKeyForbidigo = "forbidigo" - DebugKeyGoEnv = "goenv" - DebugKeyLinter = "linter" - DebugKeyLintersContext = "linters_context" - DebugKeyLintersDB = "lintersdb" - DebugKeyLintersOutput = "linters_output" - DebugKeyLoader = "loader" // Debugs packages loading (including `go/packages` internal debugging). - DebugKeyMaxFromLinter = "max_from_linter" - DebugKeyMaxSameIssues = "max_same_issues" - DebugKeyPkgCache = "pkgcache" - DebugKeyRunner = "runner" - DebugKeySeverityRules = "severity_rules" - DebugKeySkipDirs = "skip_dirs" - DebugKeySourceCode = "source_code" - DebugKeyStopwatch = "stopwatch" + DebugKeyBinSalt = "bin_salt" + DebugKeyConfigReader = "config_reader" + DebugKeyEmpty = "" + DebugKeyEnabledLinters = "enabled_linters" + DebugKeyExec = "exec" + DebugKeyFormatter = "formatter" + DebugKeyGoEnv = "goenv" + DebugKeyLinter = "linter" + DebugKeyLintersContext = "linters_context" + DebugKeyLintersDB = "lintersdb" + DebugKeyLintersOutput = "linters_output" + DebugKeyLoader = "loader" // Debugs packages loading (including `go/packages` internal debugging). + DebugKeyPkgCache = "pkgcache" + DebugKeyRunner = "runner" + DebugKeyStopwatch = "stopwatch" + DebugKeyTest = "test" +) + +// Printers. +const ( + DebugKeyCheckstylePrinter = "checkstyle_printer" + DebugKeyCodeClimatePrinter = "codeclimate_printer" + DebugKeySarifPrinter = "sarif_printer" DebugKeyTabPrinter = "tab_printer" - DebugKeyTest = "test" + DebugKeyTeamCityPrinter = "teamcity_printer" DebugKeyTextPrinter = "text_printer" ) +// Processors. +const ( + DebugKeyExclusionPaths = "exclusion_paths" + DebugKeyExclusionRules = "exclusion_rules" + DebugKeyFilenameUnadjuster = "filename_unadjuster" + DebugKeyGeneratedFileFilter = "generated_file_filter" // Debugs a filter excluding autogenerated source code. + DebugKeyInvalidIssue = "invalid_issue" + DebugKeyMaxFromLinter = "max_from_linter" + DebugKeyMaxSameIssues = "max_same_issues" + DebugKeyNolintFilter = "nolint_filter" // Debugs a filter excluding issues by `//nolint` comments. + DebugKeyPathAbsoluter = "path_absoluter" + DebugKeyPathPrettifier = "path_prettifier" + DebugKeyPathRelativity = "path_relativity" + DebugKeySeverityRules = "severity_rules" + DebugKeySkipDirs = "skip_dirs" + DebugKeySourceCode = "source_code" +) + +// Analysis. const ( DebugKeyGoAnalysis = "goanalysis" @@ -59,11 +76,12 @@ const ( DebugKeyGoAnalysisFactsInherit = DebugKeyGoAnalysisFacts + "/inherit" ) +// Linters. const ( - DebugKeyGoCritic = "gocritic" // Debugs `go-critic` linter. - DebugKeyGovet = "govet" // Debugs `govet` linter. - DebugKeyNolint = "nolint" // Debugs a filter excluding issues by `//nolint` comments. - DebugKeyRevive = "revive" // Debugs `revive` linter. + DebugKeyForbidigo = "forbidigo" // Debugs `forbidigo` linter. + DebugKeyGoCritic = "gocritic" // Debugs `gocritic` linter. + DebugKeyGovet = "govet" // Debugs `govet` linter. + DebugKeyRevive = "revive" // Debugs `revive` linter. ) func getEnabledDebugs() map[string]bool { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go index e32eef7f5..c31641d22 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go @@ -4,44 +4,40 @@ import ( "encoding/xml" "fmt" "io" - "sort" + "maps" + "slices" + "strings" "github.com/go-xmlfmt/xmlfmt" - "golang.org/x/exp/maps" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) const defaultCheckstyleSeverity = "error" -type checkstyleOutput struct { - XMLName xml.Name `xml:"checkstyle"` - Version string `xml:"version,attr"` - Files []*checkstyleFile `xml:"file"` -} - -type checkstyleFile struct { - Name string `xml:"name,attr"` - Errors []*checkstyleError `xml:"error"` -} - -type checkstyleError struct { - Column int `xml:"column,attr"` - Line int `xml:"line,attr"` - Message string `xml:"message,attr"` - Severity string `xml:"severity,attr"` - Source string `xml:"source,attr"` -} - +// Checkstyle prints issues in the Checkstyle format. +// https://checkstyle.org/config.html type Checkstyle struct { - w io.Writer + log logutils.Log + w io.Writer + sanitizer severitySanitizer } -func NewCheckstyle(w io.Writer) *Checkstyle { - return &Checkstyle{w: w} +func NewCheckstyle(log logutils.Log, w io.Writer) *Checkstyle { + return &Checkstyle{ + log: log.Child(logutils.DebugKeyCheckstylePrinter), + w: w, + sanitizer: severitySanitizer{ + // https://checkstyle.org/config.html#Severity + // https://checkstyle.org/property_types.html#SeverityLevel + allowedSeverities: []string{"ignore", "info", "warning", defaultCheckstyleSeverity}, + defaultSeverity: defaultCheckstyleSeverity, + }, + } } -func (p Checkstyle) Print(issues []result.Issue) error { +func (p *Checkstyle) Print(issues []result.Issue) error { out := checkstyleOutput{ Version: "5.0", } @@ -59,26 +55,24 @@ func (p Checkstyle) Print(issues []result.Issue) error { files[issue.FilePath()] = file } - severity := defaultCheckstyleSeverity - if issue.Severity != "" { - severity = issue.Severity - } - newError := &checkstyleError{ Column: issue.Column(), Line: issue.Line(), Message: issue.Text, Source: issue.FromLinter, - Severity: severity, + Severity: p.sanitizer.Sanitize(issue.Severity), } file.Errors = append(file.Errors, newError) } - out.Files = maps.Values(files) + err := p.sanitizer.Err() + if err != nil { + p.log.Infof("%v", err) + } - sort.Slice(out.Files, func(i, j int) bool { - return out.Files[i].Name < out.Files[j].Name + out.Files = slices.SortedFunc(maps.Values(files), func(a *checkstyleFile, b *checkstyleFile) int { + return strings.Compare(a.Name, b.Name) }) data, err := xml.Marshal(&out) @@ -93,3 +87,22 @@ func (p Checkstyle) Print(issues []result.Issue) error { return nil } + +type checkstyleOutput struct { + XMLName xml.Name `xml:"checkstyle"` + Version string `xml:"version,attr"` + Files []*checkstyleFile `xml:"file"` +} + +type checkstyleFile struct { + Name string `xml:"name,attr"` + Errors []*checkstyleError `xml:"error"` +} + +type checkstyleError struct { + Column int `xml:"column,attr"` + Line int `xml:"line,attr"` + Message string `xml:"message,attr"` + Severity string `xml:"severity,attr"` + Source string `xml:"source,attr"` +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index b65339682..983dcde36 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -4,56 +4,72 @@ import ( "encoding/json" "io" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) const defaultCodeClimateSeverity = "critical" -// CodeClimateIssue is a subset of the Code Climate spec. -// https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types -// It is just enough to support GitLab CI Code Quality. -// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool -type CodeClimateIssue struct { - Description string `json:"description"` - CheckName string `json:"check_name"` - Severity string `json:"severity,omitempty"` - Fingerprint string `json:"fingerprint"` - Location struct { - Path string `json:"path"` - Lines struct { - Begin int `json:"begin"` - } `json:"lines"` - } `json:"location"` -} - +// CodeClimate prints issues in the Code Climate format. +// https://github.com/codeclimate/platform/blob/HEAD/spec/analyzers/SPEC.md type CodeClimate struct { - w io.Writer + log logutils.Log + w io.Writer + sanitizer severitySanitizer } -func NewCodeClimate(w io.Writer) *CodeClimate { - return &CodeClimate{w: w} +func NewCodeClimate(log logutils.Log, w io.Writer) *CodeClimate { + return &CodeClimate{ + log: log.Child(logutils.DebugKeyCodeClimatePrinter), + w: w, + sanitizer: severitySanitizer{ + // https://github.com/codeclimate/platform/blob/HEAD/spec/analyzers/SPEC.md#data-types + allowedSeverities: []string{"info", "minor", "major", defaultCodeClimateSeverity, "blocker"}, + defaultSeverity: defaultCodeClimateSeverity, + }, + } } -func (p CodeClimate) Print(issues []result.Issue) error { - codeClimateIssues := make([]CodeClimateIssue, 0, len(issues)) +func (p *CodeClimate) Print(issues []result.Issue) error { + ccIssues := make([]codeClimateIssue, 0, len(issues)) for i := range issues { - issue := &issues[i] - - codeClimateIssue := CodeClimateIssue{} - codeClimateIssue.Description = issue.Description() - codeClimateIssue.CheckName = issue.FromLinter - codeClimateIssue.Location.Path = issue.Pos.Filename - codeClimateIssue.Location.Lines.Begin = issue.Pos.Line - codeClimateIssue.Fingerprint = issue.Fingerprint() - codeClimateIssue.Severity = defaultCodeClimateSeverity - - if issue.Severity != "" { - codeClimateIssue.Severity = issue.Severity + issue := issues[i] + + ccIssue := codeClimateIssue{ + Description: issue.Description(), + CheckName: issue.FromLinter, + Severity: p.sanitizer.Sanitize(issue.Severity), + Fingerprint: issue.Fingerprint(), } - codeClimateIssues = append(codeClimateIssues, codeClimateIssue) + ccIssue.Location.Path = issue.Pos.Filename + ccIssue.Location.Lines.Begin = issue.Pos.Line + + ccIssues = append(ccIssues, ccIssue) } - return json.NewEncoder(p.w).Encode(codeClimateIssues) + err := p.sanitizer.Err() + if err != nil { + p.log.Infof("%v", err) + } + + return json.NewEncoder(p.w).Encode(ccIssues) +} + +// codeClimateIssue is a subset of the Code Climate spec. +// https://github.com/codeclimate/platform/blob/HEAD/spec/analyzers/SPEC.md#data-types +// It is just enough to support GitLab CI Code Quality. +// https://docs.gitlab.com/ee/ci/testing/code_quality.html#code-quality-report-format +type codeClimateIssue struct { + Description string `json:"description"` + CheckName string `json:"check_name"` + Severity string `json:"severity,omitempty"` + Fingerprint string `json:"fingerprint"` + Location struct { + Path string `json:"path"` + Lines struct { + Begin int `json:"begin"` + } `json:"lines"` + } `json:"location"` } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go index 7dd1e5c62..6fc6bc62a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go @@ -122,6 +122,8 @@ type htmlIssue struct { Code string } +// HTML prints issues in an HTML page. +// It uses the Cloudflare CDN (cdnjs) and React. type HTML struct { w io.Writer } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go index 28509cac4..8fc94649f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go @@ -8,12 +8,13 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +// JSON prints issues in a JSON representation. type JSON struct { - rd *report.Data // TODO(ldez) should be drop in v2. Only use by JSON reporter. + rd *report.Data w io.Writer } -func NewJSON(rd *report.Data, w io.Writer) *JSON { +func NewJSON(w io.Writer, rd *report.Data) *JSON { return &JSON{ rd: rd, w: w, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go index 7d0a703b0..587cef4e2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go @@ -4,55 +4,30 @@ import ( "encoding/xml" "fmt" "io" - "sort" + "maps" + "slices" "strings" - "golang.org/x/exp/maps" - "github.com/golangci/golangci-lint/pkg/result" ) -type testSuitesXML struct { - XMLName xml.Name `xml:"testsuites"` - TestSuites []testSuiteXML -} - -type testSuiteXML struct { - XMLName xml.Name `xml:"testsuite"` - Suite string `xml:"name,attr"` - Tests int `xml:"tests,attr"` - Errors int `xml:"errors,attr"` - Failures int `xml:"failures,attr"` - TestCases []testCaseXML `xml:"testcase"` -} - -type testCaseXML struct { - Name string `xml:"name,attr"` - ClassName string `xml:"classname,attr"` - Failure failureXML `xml:"failure"` - File string `xml:"file,attr,omitempty"` - Line int `xml:"line,attr,omitempty"` -} - -type failureXML struct { - Message string `xml:"message,attr"` - Type string `xml:"type,attr"` - Content string `xml:",cdata"` -} - -type JunitXML struct { +// JUnitXML prints issues in the JUnit XML format. +// There is no official specification for the JUnit XML file format, +// and various tools generate and support different flavors of this format. +// https://github.com/testmoapp/junitxml +type JUnitXML struct { extended bool w io.Writer } -func NewJunitXML(extended bool, w io.Writer) *JunitXML { - return &JunitXML{ +func NewJUnitXML(w io.Writer, extended bool) *JUnitXML { + return &JUnitXML{ extended: extended, w: w, } } -func (p JunitXML) Print(issues []result.Issue) error { +func (p JUnitXML) Print(issues []result.Issue) error { suites := make(map[string]testSuiteXML) // use a map to group by file for ind := range issues { @@ -84,10 +59,9 @@ func (p JunitXML) Print(issues []result.Issue) error { } var res testSuitesXML - res.TestSuites = maps.Values(suites) - sort.Slice(res.TestSuites, func(i, j int) bool { - return res.TestSuites[i].Suite < res.TestSuites[j].Suite + res.TestSuites = slices.SortedFunc(maps.Values(suites), func(a testSuiteXML, b testSuiteXML) int { + return strings.Compare(a.Suite, b.Suite) }) enc := xml.NewEncoder(p.w) @@ -97,3 +71,31 @@ func (p JunitXML) Print(issues []result.Issue) error { } return nil } + +type testSuitesXML struct { + XMLName xml.Name `xml:"testsuites"` + TestSuites []testSuiteXML +} + +type testSuiteXML struct { + XMLName xml.Name `xml:"testsuite"` + Suite string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Errors int `xml:"errors,attr"` + Failures int `xml:"failures,attr"` + TestCases []testCaseXML `xml:"testcase"` +} + +type testCaseXML struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + Failure failureXML `xml:"failure"` + File string `xml:"file,attr,omitempty"` + Line int `xml:"line,attr,omitempty"` +} + +type failureXML struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Content string `xml:",cdata"` +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go index 20be02e01..613e4abec 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go @@ -6,6 +6,8 @@ import ( "io" "os" "path/filepath" + "slices" + "strings" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/logutils" @@ -23,6 +25,7 @@ type issuePrinter interface { type Printer struct { cfg *config.Output reportData *report.Data + basePath string log logutils.Log @@ -31,7 +34,7 @@ type Printer struct { } // NewPrinter creates a new Printer. -func NewPrinter(log logutils.Log, cfg *config.Output, reportData *report.Data) (*Printer, error) { +func NewPrinter(log logutils.Log, cfg *config.Output, reportData *report.Data, basePath string) (*Printer, error) { if log == nil { return nil, errors.New("missing log argument in constructor") } @@ -45,6 +48,7 @@ func NewPrinter(log logutils.Log, cfg *config.Output, reportData *report.Data) ( return &Printer{ cfg: cfg, reportData: reportData, + basePath: basePath, log: log, stdOut: logutils.StdOut, stdErr: logutils.StdErr, @@ -96,6 +100,10 @@ func (c *Printer) createWriter(path string) (io.Writer, bool, error) { return c.stdErr, false, nil } + if !filepath.IsAbs(path) { + path = filepath.Join(c.basePath, path) + } + err := os.MkdirAll(filepath.Dir(path), os.ModePerm) if err != nil { return nil, false, err @@ -114,32 +122,63 @@ func (c *Printer) createPrinter(format string, w io.Writer) (issuePrinter, error switch format { case config.OutFormatJSON: - p = NewJSON(c.reportData, w) + p = NewJSON(w, c.reportData) case config.OutFormatLineNumber, config.OutFormatColoredLineNumber: - p = NewText(c.cfg.PrintIssuedLine, - format == config.OutFormatColoredLineNumber, c.cfg.PrintLinterName, - c.log.Child(logutils.DebugKeyTextPrinter), w) + p = NewText(c.log, w, c.cfg.PrintLinterName, c.cfg.PrintIssuedLine, format == config.OutFormatColoredLineNumber) case config.OutFormatTab, config.OutFormatColoredTab: - p = NewTab(c.cfg.PrintLinterName, - format == config.OutFormatColoredTab, - c.log.Child(logutils.DebugKeyTabPrinter), w) + p = NewTab(c.log, w, c.cfg.PrintLinterName, format == config.OutFormatColoredTab) case config.OutFormatCheckstyle: - p = NewCheckstyle(w) + p = NewCheckstyle(c.log, w) case config.OutFormatCodeClimate: - p = NewCodeClimate(w) + p = NewCodeClimate(c.log, w) case config.OutFormatHTML: p = NewHTML(w) - case config.OutFormatJunitXML, config.OutFormatJunitXMLExtended: - p = NewJunitXML(format == config.OutFormatJunitXMLExtended, w) + case config.OutFormatJUnitXML, config.OutFormatJUnitXMLExtended: + p = NewJUnitXML(w, format == config.OutFormatJUnitXMLExtended) case config.OutFormatGithubActions: p = NewGitHubAction(w) case config.OutFormatTeamCity: - p = NewTeamCity(w) + p = NewTeamCity(c.log, w) case config.OutFormatSarif: - p = NewSarif(w) + p = NewSarif(c.log, w) default: return nil, fmt.Errorf("unknown output format %q", format) } return p, nil } + +type severitySanitizer struct { + allowedSeverities []string + defaultSeverity string + + unsupportedSeverities map[string]struct{} +} + +func (s *severitySanitizer) Sanitize(severity string) string { + if slices.Contains(s.allowedSeverities, severity) { + return severity + } + + if s.unsupportedSeverities == nil { + s.unsupportedSeverities = make(map[string]struct{}) + } + + s.unsupportedSeverities[severity] = struct{}{} + + return s.defaultSeverity +} + +func (s *severitySanitizer) Err() error { + if len(s.unsupportedSeverities) == 0 { + return nil + } + + var names []string + for k := range s.unsupportedSeverities { + names = append(names, "'"+k+"'") + } + + return fmt.Errorf("severities (%v) are not inside supported values (%v), fallback to '%s'", + strings.Join(names, ", "), strings.Join(s.allowedSeverities, ", "), s.defaultSeverity) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/sarif.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/sarif.go index 8b1dd2ee2..c06c11624 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/sarif.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/sarif.go @@ -4,6 +4,7 @@ import ( "encoding/json" "io" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -12,6 +13,73 @@ const ( sarifSchemaURI = "https://schemastore.azurewebsites.net/schemas/json/sarif-2.1.0-rtm.6.json" ) +const defaultSarifSeverity = "error" + +// Sarif prints issues in the SARIF format. +// https://sarifweb.azurewebsites.net/ +// https://docs.oasis-open.org/sarif/sarif/v2.1.0/ +type Sarif struct { + log logutils.Log + w io.Writer + sanitizer severitySanitizer +} + +func NewSarif(log logutils.Log, w io.Writer) *Sarif { + return &Sarif{ + log: log.Child(logutils.DebugKeySarifPrinter), + w: w, + sanitizer: severitySanitizer{ + // https://docs.oasis-open.org/sarif/sarif/v2.1.0/errata01/os/sarif-v2.1.0-errata01-os-complete.html#_Toc141790898 + allowedSeverities: []string{"none", "note", "warning", defaultSarifSeverity}, + defaultSeverity: defaultSarifSeverity, + }, + } +} + +func (p *Sarif) Print(issues []result.Issue) error { + run := sarifRun{} + run.Tool.Driver.Name = "golangci-lint" + run.Results = make([]sarifResult, 0) + + for i := range issues { + issue := issues[i] + + sr := sarifResult{ + RuleID: issue.FromLinter, + Level: p.sanitizer.Sanitize(issue.Severity), + Message: sarifMessage{Text: issue.Text}, + Locations: []sarifLocation{ + { + PhysicalLocation: sarifPhysicalLocation{ + ArtifactLocation: sarifArtifactLocation{URI: issue.FilePath()}, + Region: sarifRegion{ + StartLine: issue.Line(), + // If startColumn is absent, it SHALL default to 1. + // https://docs.oasis-open.org/sarif/sarif/v2.1.0/errata01/os/sarif-v2.1.0-errata01-os-complete.html#_Toc141790941 + StartColumn: max(1, issue.Column()), + }, + }, + }, + }, + } + + run.Results = append(run.Results, sr) + } + + err := p.sanitizer.Err() + if err != nil { + p.log.Infof("%v", err) + } + + output := SarifOutput{ + Version: sarifVersion, + Schema: sarifSchemaURI, + Runs: []sarifRun{run}, + } + + return json.NewEncoder(p.w).Encode(output) +} + type SarifOutput struct { Version string `json:"version"` Schema string `json:"$schema"` @@ -58,60 +126,3 @@ type sarifRegion struct { StartLine int `json:"startLine"` StartColumn int `json:"startColumn"` } - -type Sarif struct { - w io.Writer -} - -func NewSarif(w io.Writer) *Sarif { - return &Sarif{w: w} -} - -func (p Sarif) Print(issues []result.Issue) error { - run := sarifRun{} - run.Tool.Driver.Name = "golangci-lint" - run.Results = make([]sarifResult, 0) - - for i := range issues { - issue := issues[i] - - severity := issue.Severity - - switch severity { - // https://docs.oasis-open.org/sarif/sarif/v2.1.0/errata01/os/sarif-v2.1.0-errata01-os-complete.html#_Toc141790898 - case "none", "note", "warning", "error": - // Valid levels. - default: - severity = "error" - } - - sr := sarifResult{ - RuleID: issue.FromLinter, - Level: severity, - Message: sarifMessage{Text: issue.Text}, - Locations: []sarifLocation{ - { - PhysicalLocation: sarifPhysicalLocation{ - ArtifactLocation: sarifArtifactLocation{URI: issue.FilePath()}, - Region: sarifRegion{ - StartLine: issue.Line(), - // If startColumn is absent, it SHALL default to 1. - // https://docs.oasis-open.org/sarif/sarif/v2.1.0/errata01/os/sarif-v2.1.0-errata01-os-complete.html#_Toc141790941 - StartColumn: max(1, issue.Column()), - }, - }, - }, - }, - } - - run.Results = append(run.Results, sr) - } - - output := SarifOutput{ - Version: sarifVersion, - Schema: sarifSchemaURI, - Runs: []sarifRun{run}, - } - - return json.NewEncoder(p.w).Encode(output) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go index c6d390d18..ac04ab0fb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go @@ -11,6 +11,7 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +// Tab prints issues using tabulation as a field separator. type Tab struct { printLinterName bool useColors bool @@ -19,11 +20,11 @@ type Tab struct { w io.Writer } -func NewTab(printLinterName, useColors bool, log logutils.Log, w io.Writer) *Tab { +func NewTab(log logutils.Log, w io.Writer, printLinterName, useColors bool) *Tab { return &Tab{ printLinterName: printLinterName, useColors: useColors, - log: log, + log: log.Child(logutils.DebugKeyTabPrinter), w: w, } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go index 1d1c9f7d3..9ff5fe5bc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go @@ -4,8 +4,8 @@ import ( "fmt" "io" "strings" - "unicode/utf8" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -15,16 +15,22 @@ const ( largeLimit = 4000 ) -// TeamCity printer for TeamCity format. +const defaultTeamCitySeverity = "ERROR" + +// TeamCity prints issues in the TeamCity format. +// https://www.jetbrains.com/help/teamcity/service-messages.html type TeamCity struct { - w io.Writer - escaper *strings.Replacer + log logutils.Log + w io.Writer + escaper *strings.Replacer + sanitizer severitySanitizer } // NewTeamCity output format outputs issues according to TeamCity service message format. -func NewTeamCity(w io.Writer) *TeamCity { +func NewTeamCity(log logutils.Log, w io.Writer) *TeamCity { return &TeamCity{ - w: w, + log: log.Child(logutils.DebugKeyTeamCityPrinter), + w: w, // https://www.jetbrains.com/help/teamcity/service-messages.html#Escaped+Values escaper: strings.NewReplacer( "'", "|'", @@ -34,6 +40,11 @@ func NewTeamCity(w io.Writer) *TeamCity { "[", "|[", "]", "|]", ), + sanitizer: severitySanitizer{ + // https://www.jetbrains.com/help/teamcity/service-messages.html#Inspection+Instance + allowedSeverities: []string{"INFO", defaultTeamCitySeverity, "WARNING", "WEAK WARNING"}, + defaultSeverity: defaultTeamCitySeverity, + }, } } @@ -65,7 +76,7 @@ func (p *TeamCity) Print(issues []result.Issue) error { message: issue.Text, file: issue.FilePath(), line: issue.Line(), - severity: issue.Severity, + severity: p.sanitizer.Sanitize(strings.ToUpper(issue.Severity)), } _, err := instance.Print(p.w, p.escaper) @@ -74,6 +85,11 @@ func (p *TeamCity) Print(issues []result.Issue) error { } } + err := p.sanitizer.Err() + if err != nil { + p.log.Infof("%v", err) + } + return nil } @@ -108,15 +124,13 @@ func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int, cutVal(i.typeID, smallLimit), cutVal(replacer.Replace(i.message), largeLimit), cutVal(i.file, largeLimit), - i.line, strings.ToUpper(i.severity)) + i.line, i.severity) } func cutVal(s string, limit int) string { - var size, count int - for i := 0; i < limit && count < len(s); i++ { - _, size = utf8.DecodeRuneInString(s[count:]) - count += size + runes := []rune(s) + if len(runes) > limit { + return string(runes[:limit]) } - - return s[:count] + return s } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go index 56cced769..9e60408f0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go @@ -11,21 +11,22 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +// Text prints issues with a human friendly representation. type Text struct { - printIssuedLine bool printLinterName bool + printIssuedLine bool useColors bool log logutils.Log w io.Writer } -func NewText(printIssuedLine, useColors, printLinterName bool, log logutils.Log, w io.Writer) *Text { +func NewText(log logutils.Log, w io.Writer, printLinterName, printIssuedLine, useColors bool) *Text { return &Text{ - printIssuedLine: printIssuedLine, printLinterName: printLinterName, + printIssuedLine: printIssuedLine, useColors: useColors, - log: log, + log: log.Child(logutils.DebugKeyTextPrinter), w: w, } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 32246a6df..86a4ef3b7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -5,6 +5,7 @@ import ( "fmt" "go/token" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" ) @@ -12,18 +13,6 @@ type Range struct { From, To int } -type Replacement struct { - NeedOnlyDelete bool // need to delete all lines of the issue without replacement with new lines - NewLines []string // if NeedDelete is false it's the replacement lines - Inline *InlineFix -} - -type InlineFix struct { - StartCol int // zero-based - Length int // length of chunk to be replaced - NewString string -} - type Issue struct { FromLinter string Text string @@ -33,22 +22,28 @@ type Issue struct { // Source lines of a code with the issue to show SourceLines []string - // If we know how to fix the issue we can provide replacement lines - Replacement *Replacement - // Pkg is needed for proper caching of linting results Pkg *packages.Package `json:"-"` - LineRange *Range `json:",omitempty"` - Pos token.Position + LineRange *Range `json:",omitempty"` + // HunkPos is used only when golangci-lint is run over a diff HunkPos int `json:",omitempty"` + // If we know how to fix the issue we can provide replacement lines + SuggestedFixes []analysis.SuggestedFix `json:",omitempty"` + // If we are expecting a nolint (because this is from nolintlint), record the expected linter ExpectNoLint bool ExpectedNoLintLinter string + + // Only for Diff processor needs. + WorkingDirectoryRelativePath string `json:"-"` + + // Only for processor that need relative paths evaluation. + RelativePath string `json:"-"` } func (i *Issue) FilePath() string { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go index d7a4f0ec4..72dc20284 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go @@ -3,6 +3,7 @@ package processors import ( "regexp" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" @@ -18,6 +19,32 @@ type baseRule struct { linters []string } +// The usage of `regexp.MustCompile()` is safe here, +// because the regular expressions are checked before inside [config.BaseRule.Validate]. +func newBaseRule(rule *config.BaseRule, prefix string) baseRule { + base := baseRule{ + linters: rule.Linters, + } + + if rule.Text != "" { + base.text = regexp.MustCompile(prefix + rule.Text) + } + + if rule.Source != "" { + base.source = regexp.MustCompile(prefix + rule.Source) + } + + if rule.Path != "" { + base.path = regexp.MustCompile(fsutils.NormalizePathInRegex(rule.Path)) + } + + if rule.PathExcept != "" { + base.pathExcept = regexp.MustCompile(fsutils.NormalizePathInRegex(rule.PathExcept)) + } + + return base +} + func (r *baseRule) isEmpty() bool { return r.text == nil && r.source == nil && r.path == nil && r.pathExcept == nil && len(r.linters) == 0 } @@ -29,10 +56,10 @@ func (r *baseRule) match(issue *result.Issue, files *fsutils.Files, log logutils if r.text != nil && !r.text.MatchString(issue.Text) { return false } - if r.path != nil && !r.path.MatchString(files.WithPathPrefix(issue.FilePath())) { + if r.path != nil && !r.path.MatchString(files.WithPathPrefix(issue.RelativePath)) { return false } - if r.pathExcept != nil && r.pathExcept.MatchString(issue.FilePath()) { + if r.pathExcept != nil && r.pathExcept.MatchString(issue.RelativePath) { return false } if len(r.linters) != 0 && !r.matchLinter(issue) { @@ -58,11 +85,25 @@ func (r *baseRule) matchLinter(issue *result.Issue) bool { } func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { - sourceLine, errSourceLine := lineCache.GetLine(issue.FilePath(), issue.Line()) + sourceLine, errSourceLine := lineCache.GetLine(issue.RelativePath, issue.Line()) if errSourceLine != nil { - log.Warnf("Failed to get line %s:%d from line cache: %s", issue.FilePath(), issue.Line(), errSourceLine) + log.Warnf("Failed to get line %s:%d from line cache: %s", issue.RelativePath, issue.Line(), errSourceLine) return false // can't properly match } return r.source.MatchString(sourceLine) } + +func parseRules[T, V any](rules []T, prefix string, newFn func(*T, string) V) []V { + if len(rules) == 0 { + return nil + } + + parsedRules := make([]V, 0, len(rules)) + + for _, r := range rules { + parsedRules = append(parsedRules, newFn(&r, prefix)) + } + + return parsedRules +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go index 0e659f0f3..b09b8b728 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go @@ -1,52 +1,45 @@ package processors import ( - "fmt" "path/filepath" "strings" + "github.com/ldez/grignotin/goenv" + "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/result" ) var _ Processor = (*Cgo)(nil) +// Cgo filters cgo artifacts. +// +// Some linters (e.g. gosec, etc.) return incorrect file paths for cgo files. +// +// Require absolute file path. type Cgo struct { goCacheDir string } -func NewCgo(goenv *goutil.Env) *Cgo { +func NewCgo(env *goutil.Env) *Cgo { return &Cgo{ - goCacheDir: goenv.Get(goutil.EnvGoCache), + goCacheDir: env.Get(goenv.GOCACHE), } } -func (Cgo) Name() string { +func (*Cgo) Name() string { return "cgo" } -func (p Cgo) Process(issues []result.Issue) ([]result.Issue, error) { +func (p *Cgo) Process(issues []result.Issue) ([]result.Issue, error) { return filterIssuesErr(issues, p.shouldPassIssue) } -func (Cgo) Finish() {} - -func (p Cgo) shouldPassIssue(issue *result.Issue) (bool, error) { - // some linters (e.g. gosec, deadcode) return incorrect filepaths for cgo issues, - // also cgo files have strange issues looking like false positives. - - // cache dir contains all preprocessed files including cgo files - - issueFilePath := issue.FilePath() - if !filepath.IsAbs(issue.FilePath()) { - absPath, err := filepath.Abs(issue.FilePath()) - if err != nil { - return false, fmt.Errorf("failed to build abs path for %q: %w", issue.FilePath(), err) - } - issueFilePath = absPath - } +func (*Cgo) Finish() {} - if p.goCacheDir != "" && strings.HasPrefix(issueFilePath, p.goCacheDir) { +func (p *Cgo) shouldPassIssue(issue *result.Issue) (bool, error) { + // [p.goCacheDir] contains all preprocessed files including cgo files. + if p.goCacheDir != "" && strings.HasPrefix(issue.FilePath(), p.goCacheDir) { return false, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go index c602cdc65..a7e268b4b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go @@ -2,6 +2,7 @@ package processors import ( "bytes" + "context" "fmt" "io" "os" @@ -17,9 +18,16 @@ const envGolangciDiffProcessorPatch = "GOLANGCI_DIFF_PROCESSOR_PATCH" var _ Processor = (*Diff)(nil) +// Diff filters issues based on options `new`, `new-from-rev`, etc. +// +// Uses `git`. +// The paths inside the patch are relative to the path where git is run (the same location where golangci-lint is run). +// +// Warning: it doesn't use `path-prefix` option. type Diff struct { onlyNew bool fromRev string + fromMergeBase string patchFilePath string wholeFiles bool patch string @@ -29,38 +37,45 @@ func NewDiff(cfg *config.Issues) *Diff { return &Diff{ onlyNew: cfg.Diff, fromRev: cfg.DiffFromRevision, + fromMergeBase: cfg.DiffFromMergeBase, patchFilePath: cfg.DiffPatchFilePath, wholeFiles: cfg.WholeFiles, patch: os.Getenv(envGolangciDiffProcessorPatch), } } -func (Diff) Name() string { +func (*Diff) Name() string { return "diff" } -func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { - if !p.onlyNew && p.fromRev == "" && p.patchFilePath == "" && p.patch == "" { // no need to work +func (p *Diff) Process(issues []result.Issue) ([]result.Issue, error) { + if !p.onlyNew && p.fromRev == "" && p.fromMergeBase == "" && p.patchFilePath == "" && p.patch == "" { return issues, nil } var patchReader io.Reader - if p.patchFilePath != "" { + switch { + case p.patchFilePath != "": patch, err := os.ReadFile(p.patchFilePath) if err != nil { return nil, fmt.Errorf("can't read from patch file %s: %w", p.patchFilePath, err) } + patchReader = bytes.NewReader(patch) - } else if p.patch != "" { + + case p.patch != "": patchReader = strings.NewReader(p.patch) } - c := revgrep.Checker{ + checker := revgrep.Checker{ Patch: patchReader, RevisionFrom: p.fromRev, + MergeBase: p.fromMergeBase, WholeFiles: p.wholeFiles, } - if err := c.Prepare(); err != nil { + + err := checker.Prepare(context.Background()) + if err != nil { return nil, fmt.Errorf("can't prepare diff by revgrep: %w", err) } @@ -70,15 +85,16 @@ func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { return issue } - hunkPos, isNew := c.IsNewIssue(issue) + hunkPos, isNew := checker.IsNew(issue.WorkingDirectoryRelativePath, issue.Line()) if !isNew { return nil } newIssue := *issue newIssue.HunkPos = hunkPos + return &newIssue }), nil } -func (Diff) Finish() {} +func (*Diff) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go deleted file mode 100644 index 543120450..000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude.go +++ /dev/null @@ -1,55 +0,0 @@ -package processors - -import ( - "fmt" - "regexp" - "strings" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/result" -) - -var _ Processor = (*Exclude)(nil) - -type Exclude struct { - name string - - pattern *regexp.Regexp -} - -func NewExclude(cfg *config.Issues) *Exclude { - p := &Exclude{name: "exclude"} - - var pattern string - if len(cfg.ExcludePatterns) != 0 { - pattern = fmt.Sprintf("(%s)", strings.Join(cfg.ExcludePatterns, "|")) - } - - prefix := caseInsensitivePrefix - if cfg.ExcludeCaseSensitive { - p.name = "exclude-case-sensitive" - prefix = "" - } - - if pattern != "" { - p.pattern = regexp.MustCompile(prefix + pattern) - } - - return p -} - -func (p Exclude) Name() string { - return p.name -} - -func (p Exclude) Process(issues []result.Issue) ([]result.Issue, error) { - if p.pattern == nil { - return issues, nil - } - - return filterIssues(issues, func(issue *result.Issue) bool { - return !p.pattern.MatchString(issue.Text) - }), nil -} - -func (Exclude) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go deleted file mode 100644 index bf255ae82..000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go +++ /dev/null @@ -1,105 +0,0 @@ -package processors - -import ( - "regexp" - - "github.com/golangci/golangci-lint/pkg/config" - "github.com/golangci/golangci-lint/pkg/fsutils" - "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" -) - -var _ Processor = (*ExcludeRules)(nil) - -type excludeRule struct { - baseRule -} - -type ExcludeRules struct { - name string - - log logutils.Log - files *fsutils.Files - - rules []excludeRule -} - -func NewExcludeRules(log logutils.Log, files *fsutils.Files, cfg *config.Issues) *ExcludeRules { - p := &ExcludeRules{ - name: "exclude-rules", - files: files, - log: log, - } - - prefix := caseInsensitivePrefix - if cfg.ExcludeCaseSensitive { - prefix = "" - p.name = "exclude-rules-case-sensitive" - } - - excludeRules := cfg.ExcludeRules - - if cfg.UseDefaultExcludes { - for _, r := range config.GetExcludePatterns(cfg.IncludeDefaultExcludes) { - excludeRules = append(excludeRules, config.ExcludeRule{ - BaseRule: config.BaseRule{ - Text: r.Pattern, - Linters: []string{r.Linter}, - }, - }) - } - } - - p.rules = createRules(excludeRules, prefix) - - return p -} - -func (p ExcludeRules) Name() string { return p.name } - -func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) { - if len(p.rules) == 0 { - return issues, nil - } - - return filterIssues(issues, func(issue *result.Issue) bool { - for _, rule := range p.rules { - if rule.match(issue, p.files, p.log) { - return false - } - } - - return true - }), nil -} - -func (ExcludeRules) Finish() {} - -func createRules(rules []config.ExcludeRule, prefix string) []excludeRule { - parsedRules := make([]excludeRule, 0, len(rules)) - - for _, rule := range rules { - parsedRule := excludeRule{} - parsedRule.linters = rule.Linters - - if rule.Text != "" { - parsedRule.text = regexp.MustCompile(prefix + rule.Text) - } - - if rule.Source != "" { - parsedRule.source = regexp.MustCompile(prefix + rule.Source) - } - - if rule.Path != "" { - parsedRule.path = regexp.MustCompile(fsutils.NormalizePathInRegex(rule.Path)) - } - - if rule.PathExcept != "" { - parsedRule.pathExcept = regexp.MustCompile(fsutils.NormalizePathInRegex(rule.PathExcept)) - } - - parsedRules = append(parsedRules, parsedRule) - } - - return parsedRules -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_generated_file_filter.go similarity index 82% rename from vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go rename to vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_generated_file_filter.go index 82316f6a0..ce4e2e214 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_generated_file_filter.go @@ -33,13 +33,17 @@ const ( genSwaggerCodegen = "* generated by: swagger codegen " ) -var _ Processor = (*AutogeneratedExclude)(nil) +var _ Processor = (*GeneratedFileFilter)(nil) type fileSummary struct { generated bool } -type AutogeneratedExclude struct { +// GeneratedFileFilter filters generated files. +// - mode "lax": see `isGeneratedFileLax` documentation. +// - mode "strict": see `isGeneratedFileStrict` documentation. +// - mode "disable": skips this processor. +type GeneratedFileFilter struct { debugf logutils.DebugFunc mode string @@ -48,20 +52,20 @@ type AutogeneratedExclude struct { fileSummaryCache map[string]*fileSummary } -func NewAutogeneratedExclude(mode string) *AutogeneratedExclude { - return &AutogeneratedExclude{ - debugf: logutils.Debug(logutils.DebugKeyAutogenExclude), +func NewGeneratedFileFilter(mode string) *GeneratedFileFilter { + return &GeneratedFileFilter{ + debugf: logutils.Debug(logutils.DebugKeyGeneratedFileFilter), mode: mode, strictPattern: regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`), fileSummaryCache: map[string]*fileSummary{}, } } -func (*AutogeneratedExclude) Name() string { - return "autogenerated_exclude" +func (*GeneratedFileFilter) Name() string { + return "generated_file_filter" } -func (p *AutogeneratedExclude) Process(issues []result.Issue) ([]result.Issue, error) { +func (p *GeneratedFileFilter) Process(issues []result.Issue) ([]result.Issue, error) { if p.mode == AutogeneratedModeDisable { return issues, nil } @@ -69,9 +73,9 @@ func (p *AutogeneratedExclude) Process(issues []result.Issue) ([]result.Issue, e return filterIssuesErr(issues, p.shouldPassIssue) } -func (*AutogeneratedExclude) Finish() {} +func (*GeneratedFileFilter) Finish() {} -func (p *AutogeneratedExclude) shouldPassIssue(issue *result.Issue) (bool, error) { +func (p *GeneratedFileFilter) shouldPassIssue(issue *result.Issue) (bool, error) { if filepath.Base(issue.FilePath()) == "go.mod" { return true, nil } @@ -109,7 +113,7 @@ func (p *AutogeneratedExclude) shouldPassIssue(issue *result.Issue) (bool, error // isGeneratedFileLax reports whether the source file is generated code. // The function uses a bit laxer rules than isGeneratedFileStrict to match more generated code. // See https://github.com/golangci/golangci-lint/issues/48 and https://github.com/golangci/golangci-lint/issues/72. -func (p *AutogeneratedExclude) isGeneratedFileLax(doc string) bool { +func (p *GeneratedFileFilter) isGeneratedFileLax(doc string) bool { markers := []string{genCodeGenerated, genDoNotEdit, genAutoFile, genSwaggerCodegen} doc = strings.ToLower(doc) @@ -133,7 +137,7 @@ func (p *AutogeneratedExclude) isGeneratedFileLax(doc string) bool { // // This line must appear before the first non-comment, non-blank text in the file. // Based on https://go.dev/s/generatedcode. -func (p *AutogeneratedExclude) isGeneratedFileStrict(filePath string) (bool, error) { +func (p *GeneratedFileFilter) isGeneratedFileStrict(filePath string) (bool, error) { file, err := parser.ParseFile(token.NewFileSet(), filePath, nil, parser.PackageClauseOnly|parser.ParseComments) if err != nil { return false, fmt.Errorf("failed to parse file: %w", err) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_paths.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_paths.go new file mode 100644 index 000000000..bd4304870 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_paths.go @@ -0,0 +1,118 @@ +package processors + +import ( + "fmt" + "regexp" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var _ Processor = (*ExclusionPaths)(nil) + +type ExclusionPaths struct { + pathPatterns []*regexp.Regexp + pathExceptPatterns []*regexp.Regexp + + warnUnused bool + excludedPathCounter map[*regexp.Regexp]int + excludedPathExceptCounter map[*regexp.Regexp]int + + log logutils.Log +} + +func NewExclusionPaths(log logutils.Log, cfg *config.LinterExclusions) (*ExclusionPaths, error) { + excludedPathCounter := make(map[*regexp.Regexp]int) + + var pathPatterns []*regexp.Regexp + for _, p := range cfg.Paths { + p = fsutils.NormalizePathInRegex(p) + + patternRe, err := regexp.Compile(p) + if err != nil { + return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) + } + + pathPatterns = append(pathPatterns, patternRe) + excludedPathCounter[patternRe] = 0 + } + + excludedPathExceptCounter := make(map[*regexp.Regexp]int) + + var pathExceptPatterns []*regexp.Regexp + for _, p := range cfg.PathsExcept { + p = fsutils.NormalizePathInRegex(p) + + patternRe, err := regexp.Compile(p) + if err != nil { + return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) + } + + pathExceptPatterns = append(pathExceptPatterns, patternRe) + excludedPathExceptCounter[patternRe] = 0 + } + + return &ExclusionPaths{ + pathPatterns: pathPatterns, + pathExceptPatterns: pathExceptPatterns, + warnUnused: cfg.WarnUnused, + excludedPathCounter: excludedPathCounter, + excludedPathExceptCounter: excludedPathExceptCounter, + log: log.Child(logutils.DebugKeyExclusionPaths), + }, nil +} + +func (*ExclusionPaths) Name() string { + return "exclusion_paths" +} + +func (p *ExclusionPaths) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.pathPatterns) == 0 && len(p.pathExceptPatterns) == 0 { + return issues, nil + } + + return filterIssues(issues, p.shouldPassIssue), nil +} + +func (p *ExclusionPaths) Finish() { + for pattern, count := range p.excludedPathCounter { + if p.warnUnused && count == 0 { + p.log.Warnf("The pattern %q match %d issues", pattern, count) + } else { + p.log.Infof("Skipped %d issues by pattern %q", count, pattern) + } + } + + for pattern, count := range p.excludedPathExceptCounter { + if p.warnUnused && count == 0 { + p.log.Warnf("The pattern %q match %d issues", pattern, count) + } + } +} + +func (p *ExclusionPaths) shouldPassIssue(issue *result.Issue) bool { + for _, pattern := range p.pathPatterns { + if pattern.MatchString(issue.RelativePath) { + p.excludedPathCounter[pattern]++ + return false + } + } + + if len(p.pathExceptPatterns) == 0 { + return true + } + + matched := false + for _, pattern := range p.pathExceptPatterns { + if !pattern.MatchString(issue.RelativePath) { + continue + } + + p.excludedPathExceptCounter[pattern]++ + matched = true + } + + return !matched +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_presets.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_presets.go new file mode 100644 index 000000000..17299b90c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_presets.go @@ -0,0 +1,138 @@ +package processors + +import "github.com/golangci/golangci-lint/pkg/config" + +var linterExclusionPresets = map[string][]config.ExcludeRule{ + config.ExclusionPresetComments: { + { + // Annoying issue about not having a comment. The rare codebase has such comments. + // CheckPackageComment, CheckExportedFunctionDocs, CheckExportedTypeDocs, CheckExportedVarDocs + BaseRule: config.BaseRule{ + Text: "(ST1000|ST1020|ST1021|ST1022)", + Linters: []string{"stylecheck"}, + InternalReference: "EXC0011", + }, + }, + { + // Annoying issue about not having a comment. The rare codebase has such comments. + // rule: exported + BaseRule: config.BaseRule{ + Text: `exported (.+) should have comment( \(or a comment on this block\))? or be unexported`, + Linters: []string{"revive"}, + InternalReference: "EXC0012", + }, + }, + { + // Annoying issue about not having a comment. The rare codebase has such comments. + // rule: package-comments + BaseRule: config.BaseRule{ + Text: `package comment should be of the form "(.+)..."`, + Linters: []string{"revive"}, + InternalReference: "EXC0013", + }, + }, + { + // Annoying issue about not having a comment. The rare codebase has such comments. + // rule: exported + BaseRule: config.BaseRule{ + Text: `comment on exported (.+) should be of the form "(.+)..."`, + Linters: []string{"revive"}, + InternalReference: "EXC0014", + }, + }, + { + // Annoying issue about not having a comment. The rare codebase has such comments. + // rule: package-comments + BaseRule: config.BaseRule{ + Text: `should have a package comment`, + Linters: []string{"revive"}, + InternalReference: "EXC0015", + }, + }, + }, + config.ExclusionPresetStdErrorHandling: { + { + // Almost all programs ignore errors on these functions and in most cases it's ok. + BaseRule: config.BaseRule{ + Text: "Error return value of .((os\\.)?std(out|err)\\..*|.*Close" + + "|.*Flush|os\\.Remove(All)?|.*print(f|ln)?|os\\.(Un)?Setenv). is not checked", + Linters: []string{"errcheck"}, + InternalReference: "EXC0001", + }, + }, + }, + config.ExclusionPresetCommonFalsePositives: { + { + // Too many false-positives on 'unsafe' usage. + BaseRule: config.BaseRule{ + Text: "G103: Use of unsafe calls should be audited", + Linters: []string{"gosec"}, + InternalReference: "EXC0006", + }, + }, + { + // Too many false-positives for parametrized shell calls. + BaseRule: config.BaseRule{ + Text: "G204: Subprocess launched with variable", + Linters: []string{"gosec"}, + InternalReference: "EXC0007", + }, + }, + { + // False positive is triggered by 'src, err := ioutil.ReadFile(filename)'. + BaseRule: config.BaseRule{ + Text: "G304: Potential file inclusion via variable", + Linters: []string{"gosec"}, + InternalReference: "EXC0010", + }, + }, + }, + config.ExclusionPresetLegacy: { + { + // Common false positives. + BaseRule: config.BaseRule{ + Text: "(possible misuse of unsafe.Pointer|should have signature)", + Linters: []string{"govet"}, + InternalReference: "EXC0004", + }, + }, + { + // Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore. + // CheckScopedBreak + BaseRule: config.BaseRule{ + Text: "SA4011", + Linters: []string{"staticcheck"}, + InternalReference: "EXC0005", + }, + }, + { + // Duplicated errcheck checks. + // Errors unhandled. + BaseRule: config.BaseRule{ + Text: "G104", + Linters: []string{"gosec"}, + InternalReference: "EXC0008", + }, + }, + { + // Too many issues in popular repos. + BaseRule: config.BaseRule{ + Text: "(G301|G302|G307): Expect (directory permissions to be 0750|file permissions to be 0600) or less", + Linters: []string{"gosec"}, + InternalReference: "EXC0009", + }, + }, + }, +} + +func getLinterExclusionPresets(names []string) []config.ExcludeRule { + var rules []config.ExcludeRule + + for _, name := range names { + if p, ok := linterExclusionPresets[name]; ok { + rules = append(rules, p...) + } + } + + return rules +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_rules.go new file mode 100644 index 000000000..7730a53dd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclusion_rules.go @@ -0,0 +1,164 @@ +package processors + +import ( + "fmt" + "slices" + "strings" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var _ Processor = (*ExclusionRules)(nil) + +type ExclusionRules struct { + log logutils.Log + files *fsutils.Files + + warnUnused bool + skippedCounter map[string]int + + rules []excludeRule +} + +func NewExclusionRules(log logutils.Log, files *fsutils.Files, + cfg *config.LinterExclusions, oldCfg *config.Issues) *ExclusionRules { + p := &ExclusionRules{ + log: log, + files: files, + warnUnused: cfg.WarnUnused, + skippedCounter: map[string]int{}, + } + + // TODO(ldez) remove prefix in v2: the matching must be case sensitive, users can add `(?i)` inside the patterns if needed. + prefix := caseInsensitivePrefix + if oldCfg.ExcludeCaseSensitive { + prefix = "" + } + + excludeRules := slices.Concat(slices.Clone(cfg.Rules), + filterInclude(getLinterExclusionPresets(cfg.Presets), oldCfg.IncludeDefaultExcludes)) + + p.rules = parseRules(excludeRules, prefix, newExcludeRule) + + // TODO(ldez): should be removed in v2. + for _, pattern := range oldCfg.ExcludePatterns { + if pattern == "" { + continue + } + + r := &config.ExcludeRule{ + BaseRule: config.BaseRule{ + Path: `.+\.go`, + Text: pattern, + }, + } + + rule := newExcludeRule(r, prefix) + + p.rules = append(p.rules, rule) + } + + for _, rule := range p.rules { + if rule.internalReference == "" { + p.skippedCounter[rule.String()] = 0 + } + } + + return p +} + +func (*ExclusionRules) Name() string { + return "exclusion_rules" +} + +func (p *ExclusionRules) Process(issues []result.Issue) ([]result.Issue, error) { + if len(p.rules) == 0 { + return issues, nil + } + + return filterIssues(issues, func(issue *result.Issue) bool { + for _, rule := range p.rules { + if !rule.match(issue, p.files, p.log) { + continue + } + + // Ignore default rules. + if rule.internalReference == "" { + p.skippedCounter[rule.String()]++ + } + + return false + } + + return true + }), nil +} + +func (p *ExclusionRules) Finish() { + for rule, count := range p.skippedCounter { + if p.warnUnused && count == 0 { + p.log.Warnf("Skipped %d issues by rules: [%s]", count, rule) + } else { + p.log.Infof("Skipped %d issues by rules: [%s]", count, rule) + } + } +} + +type excludeRule struct { + baseRule + + // For compatibility with exclude-use-default/include. + internalReference string `mapstructure:"-"` +} + +func newExcludeRule(rule *config.ExcludeRule, prefix string) excludeRule { + return excludeRule{ + baseRule: newBaseRule(&rule.BaseRule, prefix), + internalReference: rule.InternalReference, + } +} + +func (e excludeRule) String() string { + var msg []string + + if e.text != nil && e.text.String() != "" { + msg = append(msg, fmt.Sprintf("Text: %q", e.text)) + } + + if e.source != nil && e.source.String() != "" { + msg = append(msg, fmt.Sprintf("Source: %q", e.source)) + } + + if e.path != nil && e.path.String() != "" { + msg = append(msg, fmt.Sprintf("Path: %q", e.path)) + } + + if e.pathExcept != nil && e.pathExcept.String() != "" { + msg = append(msg, fmt.Sprintf("Path Except: %q", e.pathExcept)) + } + + if len(e.linters) > 0 { + msg = append(msg, fmt.Sprintf("Linters: %q", strings.Join(e.linters, ", "))) + } + + return strings.Join(msg, ", ") +} + +// TODO(ldez): must be removed in v2, only for compatibility with exclude-use-default/include. +func filterInclude(rules []config.ExcludeRule, refs []string) []config.ExcludeRule { + if len(refs) == 0 { + return rules + } + + var filteredRules []config.ExcludeRule + for _, rule := range rules { + if !slices.Contains(refs, rule.InternalReference) { + filteredRules = append(filteredRules, rule) + } + } + + return filteredRules +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go index 6a1387c87..5f39e064b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go @@ -3,7 +3,6 @@ package processors import ( "go/parser" "go/token" - "path/filepath" "strings" "sync" "time" @@ -23,9 +22,13 @@ type adjustMap struct { m map[string]posMapper } -// FilenameUnadjuster is needed because a lot of linters use fset.Position(f.Pos()) -// to get filename. And they return adjusted filename (e.g. *.qtpl) for an issue. We need -// restore real .go filename to properly output it, parse it, etc. +// FilenameUnadjuster fixes filename based on adjusted and unadjusted position (related to line directives and cgo). +// +// A lot of linters use `fset.Position(f.Pos())` to get filename, +// and they return adjusted filename (e.g.` *.qtpl`) for an issue. +// We need restore real `.go` filename to properly output it, parse it, etc. +// +// Require absolute file path. type FilenameUnadjuster struct { m map[string]posMapper // map from adjusted filename to position mapper: adjusted -> unadjusted position log logutils.Log @@ -36,8 +39,10 @@ func NewFilenameUnadjuster(pkgs []*packages.Package, log logutils.Log) *Filename m := adjustMap{m: map[string]posMapper{}} startedAt := time.Now() + var wg sync.WaitGroup wg.Add(len(pkgs)) + for _, pkg := range pkgs { go func(pkg *packages.Package) { // It's important to call func here to run GC @@ -45,7 +50,9 @@ func NewFilenameUnadjuster(pkgs []*packages.Package, log logutils.Log) *Filename wg.Done() }(pkg) } + wg.Wait() + log.Infof("Pre-built %d adjustments in %s", len(m.m), time.Since(startedAt)) return &FilenameUnadjuster{ @@ -61,17 +68,7 @@ func (*FilenameUnadjuster) Name() string { func (p *FilenameUnadjuster) Process(issues []result.Issue) ([]result.Issue, error) { return transformIssues(issues, func(issue *result.Issue) *result.Issue { - issueFilePath := issue.FilePath() - if !filepath.IsAbs(issue.FilePath()) { - absPath, err := filepath.Abs(issue.FilePath()) - if err != nil { - p.log.Warnf("failed to build abs path for %q: %s", issue.FilePath(), err) - return issue - } - issueFilePath = absPath - } - - mapper := p.m[issueFilePath] + mapper := p.m[issue.FilePath()] if mapper == nil { return issue } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index 764af5a92..610f249ef 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -1,16 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// This file is inspired by go/analysis/internal/checker/checker.go + package processors import ( - "bytes" + "errors" "fmt" + "maps" "os" - "path/filepath" - "sort" - "strings" + "slices" - "github.com/golangci/golangci-lint/internal/go/robustio" + "github.com/golangci/golangci-lint/internal/x/tools/diff" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/goformatters" + "github.com/golangci/golangci-lint/pkg/goformatters/gci" + "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" + "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" + "github.com/golangci/golangci-lint/pkg/goformatters/goimports" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/timeutils" @@ -18,19 +28,25 @@ import ( var _ Processor = (*Fixer)(nil) +const filePerm = 0644 + +// Fixer fixes reports if possible. +// The reports that are not fixed are passed to the next processor. type Fixer struct { cfg *config.Config log logutils.Log fileCache *fsutils.FileCache sw *timeutils.Stopwatch + formatter *goformatters.MetaFormatter } -func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache) *Fixer { +func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache, formatter *goformatters.MetaFormatter) *Fixer { return &Fixer{ cfg: cfg, log: log, fileCache: fileCache, sw: timeutils.NewStopwatch("fixer", log), + formatter: formatter, } } @@ -43,218 +59,246 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - outIssues := make([]result.Issue, 0, len(issues)) - issuesToFixPerFile := map[string][]result.Issue{} - for i := range issues { - issue := &issues[i] - if issue.Replacement == nil { - outIssues = append(outIssues, *issue) - continue - } + p.log.Infof("Applying suggested fixes") - issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) - } - - for file, issuesToFix := range issuesToFixPerFile { - err := p.sw.TrackStageErr("all", func() error { - return p.fixIssuesInFile(file, issuesToFix) - }) - if err != nil { - p.log.Errorf("Failed to fix issues in file %s: %s", file, err) - - // show issues only if can't fix them - outIssues = append(outIssues, issuesToFix...) - } + notFixableIssues, err := timeutils.TrackStage(p.sw, "all", func() ([]result.Issue, error) { + return p.process(issues) + }) + if err != nil { + p.log.Warnf("Failed to fix issues: %v", err) } p.printStat() - return outIssues, nil + return notFixableIssues, nil } -func (Fixer) Finish() {} +//nolint:funlen,gocyclo // This function should not be split. +func (p Fixer) process(issues []result.Issue) ([]result.Issue, error) { + // filenames / linters / edits + editsByLinter := make(map[string]map[string][]diff.Edit) -func (p Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { - // TODO: don't read the whole file into memory: read line by line; - // can't just use bufio.scanner: it has a line length limit - origFileData, err := p.fileCache.GetFileBytes(filePath) - if err != nil { - return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err) - } + formatters := []string{gofumpt.Name, goimports.Name, gofmt.Name, gci.Name} - origFileLines := bytes.Split(origFileData, []byte("\n")) + var notFixableIssues []result.Issue - tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) + toBeFormattedFiles := make(map[string]struct{}) - tmpOutFile, err := os.Create(tmpFileName) - if err != nil { - return fmt.Errorf("failed to make file %s: %w", tmpFileName, err) - } - - // merge multiple issues per line into one issue - issuesPerLine := map[int][]result.Issue{} for i := range issues { - issue := &issues[i] - issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) - } + issue := issues[i] - issues = issues[:0] // reuse the same memory - for line, lineIssues := range issuesPerLine { - if mergedIssue := p.mergeLineIssues(line, lineIssues, origFileLines); mergedIssue != nil { - issues = append(issues, *mergedIssue) + if slices.Contains(formatters, issue.FromLinter) { + toBeFormattedFiles[issue.FilePath()] = struct{}{} + continue } - } - issues = p.findNotIntersectingIssues(issues) + if issue.SuggestedFixes == nil || skipNoTextEdit(&issue) { + notFixableIssues = append(notFixableIssues, issue) + continue + } - if err = p.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { - tmpOutFile.Close() - _ = robustio.RemoveAll(tmpOutFile.Name()) - return err + for _, sf := range issue.SuggestedFixes { + for _, edit := range sf.TextEdits { + start, end := edit.Pos, edit.End + if start > end { + return nil, fmt.Errorf("%q suggests invalid fix: pos (%v) > end (%v)", + issue.FromLinter, edit.Pos, edit.End) + } + + edit := diff.Edit{ + Start: int(start), + End: int(end), + New: string(edit.NewText), + } + + if _, ok := editsByLinter[issue.FilePath()]; !ok { + editsByLinter[issue.FilePath()] = make(map[string][]diff.Edit) + } + + editsByLinter[issue.FilePath()][issue.FromLinter] = append(editsByLinter[issue.FilePath()][issue.FromLinter], edit) + } + } } - tmpOutFile.Close() + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for path, linterToEdits := range editsByLinter { + excludedLinters := make(map[string]struct{}) - if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil { - _ = robustio.RemoveAll(tmpOutFile.Name()) - return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err) - } + linters := slices.Collect(maps.Keys(linterToEdits)) - return nil -} + // Does any linter create conflicting edits? + for _, linter := range linters { + edits := linterToEdits[linter] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := linter, edits[invalid-1], edits[invalid] + excludedLinters[name] = struct{}{} -func (p Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileLines [][]byte) *result.Issue { - origLine := origFileLines[lineNum-1] // lineNum is 1-based + err := diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) + // TODO(ldez) TUI? + p.log.Warnf("Changes related to %q are skipped for the file %q: %v", + name, path, err) + } + } - if len(lineIssues) == 1 && lineIssues[0].Replacement.Inline == nil { - return &lineIssues[0] - } + // Does any pair of different linters create edits that conflict? + for j := range linters { + for k := range linters[:j] { + x, y := linters[j], linters[k] + if x > y { + x, y = y, x + } - // check issues first - for ind := range lineIssues { - li := &lineIssues[ind] + _, foundX := excludedLinters[x] + _, foundY := excludedLinters[y] + if foundX || foundY { + continue + } - if li.LineRange != nil { - p.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) - return &lineIssues[0] - } + xedits, yedits := linterToEdits[x], linterToEdits[y] - inline := li.Replacement.Inline + combined := slices.Concat(xedits, yedits) - if inline == nil || len(li.Replacement.NewLines) != 0 || li.Replacement.NeedOnlyDelete { - p.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues) - return li + if _, invalid := validateEdits(combined); invalid > 0 { + excludedLinters[x] = struct{}{} + p.log.Warnf("Changes related to %q are skipped for the file %q due to conflicts with %q.", x, path, y) + } + } } - if inline.StartCol < 0 || inline.Length <= 0 || inline.StartCol+inline.Length > len(origLine) { - p.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, li, inline) - return nil + var edits []diff.Edit + for linter := range linterToEdits { + if _, found := excludedLinters[linter]; !found { + edits = append(edits, linterToEdits[linter]...) + } } - } - return p.applyInlineFixes(lineIssues, origLine, lineNum) -} + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } -func (p Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, lineNum int) *result.Issue { - sort.Slice(lineIssues, func(i, j int) bool { - return lineIssues[i].Replacement.Inline.StartCol < lineIssues[j].Replacement.Inline.StartCol - }) + var editError error - var newLineBuf bytes.Buffer - newLineBuf.Grow(len(origLine)) + var formattedFiles []string - //nolint:misspell // misspelling is intentional - // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" + // Now we've got a set of valid edits for each file. Apply them. + for path, edits := range editsByPath { + contents, err := p.fileCache.GetFileBytes(path) + if err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue + } - curOrigLinePos := 0 - for i := range lineIssues { - fix := lineIssues[i].Replacement.Inline - if fix.StartCol < curOrigLinePos { - p.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) - return nil + out, err := diff.ApplyBytes(contents, edits) + if err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue } - if curOrigLinePos != fix.StartCol { - newLineBuf.Write(origLine[curOrigLinePos:fix.StartCol]) + // Try to format the file. + out = p.formatter.Format(path, out) + + if err := os.WriteFile(path, out, filePerm); err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue } - newLineBuf.WriteString(fix.NewString) - curOrigLinePos = fix.StartCol + fix.Length - } - if curOrigLinePos != len(origLine) { - newLineBuf.Write(origLine[curOrigLinePos:]) + + formattedFiles = append(formattedFiles, path) } - mergedIssue := lineIssues[0] // use text from the first issue (it's not really used) - mergedIssue.Replacement = &result.Replacement{ - NewLines: []string{newLineBuf.String()}, + for path := range toBeFormattedFiles { + // Skips files already formatted by the previous fix step. + if !slices.Contains(formattedFiles, path) { + content, err := p.fileCache.GetFileBytes(path) + if err != nil { + p.log.Warnf("Error reading file %s: %v", path, err) + continue + } + + out := p.formatter.Format(path, content) + + if err := os.WriteFile(path, out, filePerm); err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue + } + } } - return &mergedIssue + + return notFixableIssues, editError } -func (p Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { - sort.SliceStable(issues, func(i, j int) bool { - a, b := issues[i], issues[j] - return a.Line() < b.Line() - }) +func (Fixer) Finish() {} - var ret []result.Issue - var currentEnd int - for i := range issues { - issue := &issues[i] - rng := issue.GetLineRange() - if rng.From <= currentEnd { - p.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) - continue // skip intersecting issue +func (p Fixer) printStat() { + p.sw.PrintStages() +} + +func skipNoTextEdit(issue *result.Issue) bool { + var onlyMessage int + for _, sf := range issue.SuggestedFixes { + if len(sf.TextEdits) == 0 { + onlyMessage++ } - p.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) - ret = append(ret, *issue) - currentEnd = rng.To } - return ret + return len(issue.SuggestedFixes) == onlyMessage } -func (p Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmpOutFile *os.File) error { - // issues aren't intersecting +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +// +//nolint:gocritic // Copy of go/analysis/internal/checker/checker.go +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } - nextIssueIndex := 0 - for i := 0; i < len(origFileLines); i++ { - var outLine string - var nextIssue *result.Issue - if nextIssueIndex != len(issues) { - nextIssue = &issues[nextIssueIndex] - } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } - origFileLineNumber := i + 1 - if nextIssue == nil || origFileLineNumber != nextIssue.GetLineRange().From { - outLine = string(origFileLines[i]) - } else { - nextIssueIndex++ - rng := nextIssue.GetLineRange() - if rng.From > rng.To { - // Maybe better decision is to skip such issues, re-evaluate if regressed. - p.log.Warnf("[fixer]: issue line range is probably invalid, fix can be incorrect (from=%d, to=%d, linter=%s)", - rng.From, rng.To, nextIssue.FromLinter, - ) - } - i += rng.To - rng.From - if nextIssue.Replacement.NeedOnlyDelete { - continue + diff.SortEdits(edits) + + unique := []diff.Edit{edits[0]} + + invalid := -1 + + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i } - outLine = strings.Join(nextIssue.Replacement.NewLines, "\n") } + } + return unique, invalid +} - if i < len(origFileLines)-1 { - outLine += "\n" - } - if _, err := tmpOutFile.WriteString(outLine); err != nil { - return fmt.Errorf("failed to write output line: %w", err) - } +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +// Copy of go/analysis/internal/checker/checker.go +func diff3Conflict(path, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := os.ReadFile(path) + if err != nil { + return err } + oldlabel, old := "base", string(contents) - return nil -} + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines) + if err != nil { + return err + } -func (p Fixer) printStat() { - p.sw.PrintStages() + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go index 876fd3bd3..9f332705e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/identifier_marker.go @@ -9,146 +9,137 @@ import ( var _ Processor = (*IdentifierMarker)(nil) type replacePattern struct { - re string + exp *regexp.Regexp repl string } -type replaceRegexp struct { - re *regexp.Regexp - repl string -} - -var replacePatterns = []replacePattern{ - // unparam - {`^(\S+) - (\S+) is unused$`, "`${1}` - `${2}` is unused"}, - {`^(\S+) - (\S+) always receives (\S+) \((.*)\)$`, "`${1}` - `${2}` always receives `${3}` (`${4}`)"}, - {`^(\S+) - (\S+) always receives (.*)$`, "`${1}` - `${2}` always receives `${3}`"}, - {`^(\S+) - result (\S+) is always (\S+)`, "`${1}` - result `${2}` is always `${3}`"}, - - // interfacer - {`^(\S+) can be (\S+)$`, "`${1}` can be `${2}`"}, - - // govet - {`^printf: (\S+) arg list ends with redundant newline$`, "printf: `${1}` arg list ends with redundant newline"}, - {`^composites: (\S+) composite literal uses unkeyed fields$`, "composites: `${1}` composite literal uses unkeyed fields"}, - - // gosec - { - `^(\S+): Blacklisted import (\S+): weak cryptographic primitive$`, - "${1}: Blacklisted import `${2}`: weak cryptographic primitive", - }, - {`^TLS InsecureSkipVerify set true.$`, "TLS `InsecureSkipVerify` set true."}, - - // gosimple - {`should replace loop with (.*)$`, "should replace loop with `${1}`"}, - { - `should use a simple channel send/receive instead of select with a single case`, - "should use a simple channel send/receive instead of `select` with a single case", - }, - { - `should omit comparison to bool constant, can be simplified to (.+)$`, - "should omit comparison to bool constant, can be simplified to `${1}`", - }, - {`should write (.+) instead of (.+)$`, "should write `${1}` instead of `${2}`"}, - {`redundant return statement$`, "redundant `return` statement"}, - { - `should replace this if statement with an unconditional strings.TrimPrefix`, - "should replace this `if` statement with an unconditional `strings.TrimPrefix`", - }, - - // staticcheck - {`this value of (\S+) is never used$`, "this value of `${1}` is never used"}, - { - `should use time.Since instead of time.Now\(\).Sub$`, - "should use `time.Since` instead of `time.Now().Sub`", - }, - { - `should check returned error before deferring response.Close\(\)$`, - "should check returned error before deferring `response.Close()`", - }, - {`no value of type uint is less than 0$`, "no value of type `uint` is less than `0`"}, - - // unused - {`(func|const|field|type|var) (\S+) is unused$`, "${1} `${2}` is unused"}, - - // typecheck - {`^unknown field (\S+) in struct literal$`, "unknown field `${1}` in struct literal"}, - { - `^invalid operation: (\S+) \(variable of type (\S+)\) has no field or method (\S+)$`, - "invalid operation: `${1}` (variable of type `${2}`) has no field or method `${3}`", - }, - {`^undeclared name: (\S+)$`, "undeclared name: `${1}`"}, - { - `^cannot use addr \(variable of type (\S+)\) as (\S+) value in argument to (\S+)$`, - "cannot use addr (variable of type `${1}`) as `${2}` value in argument to `${3}`", - }, - {`^other declaration of (\S+)$`, "other declaration of `${1}`"}, - {`^(\S+) redeclared in this block$`, "`${1}` redeclared in this block"}, - - // golint - { - `^exported (type|method|function|var|const) (\S+) should have comment or be unexported$`, - "exported ${1} `${2}` should have comment or be unexported", - }, - { - `^comment on exported (type|method|function|var|const) (\S+) should be of the form "(\S+) ..."$`, - "comment on exported ${1} `${2}` should be of the form `${3} ...`", - }, - {`^should replace (.+) with (.+)$`, "should replace `${1}` with `${2}`"}, - { - `^if block ends with a return statement, so drop this else and outdent its block$`, - "`if` block ends with a `return` statement, so drop this `else` and outdent its block", - }, - { - `^(struct field|var|range var|const|type|(?:func|method|interface method) (?:parameter|result)) (\S+) should be (\S+)$`, - "${1} `${2}` should be `${3}`", - }, - { - `^don't use underscores in Go names; var (\S+) should be (\S+)$`, - "don't use underscores in Go names; var `${1}` should be `${2}`", - }, -} - +// IdentifierMarker modifies report text. +// It must be before [Exclude] and [ExcludeRules]: +// users configure exclusions based on the modified text. type IdentifierMarker struct { - replaceRegexps []replaceRegexp + patterns map[string][]replacePattern } func NewIdentifierMarker() *IdentifierMarker { - var replaceRegexps []replaceRegexp - for _, p := range replacePatterns { - r := replaceRegexp{ - re: regexp.MustCompile(p.re), - repl: p.repl, - } - replaceRegexps = append(replaceRegexps, r) - } - return &IdentifierMarker{ - replaceRegexps: replaceRegexps, + patterns: map[string][]replacePattern{ + "unparam": { + { + exp: regexp.MustCompile(`^(\S+) - (\S+) is unused$`), + repl: "`${1}` - `${2}` is unused", + }, + { + exp: regexp.MustCompile(`^(\S+) - (\S+) always receives (\S+) \((.*)\)$`), + repl: "`${1}` - `${2}` always receives `${3}` (`${4}`)", + }, + { + exp: regexp.MustCompile(`^(\S+) - (\S+) always receives (.*)$`), + repl: "`${1}` - `${2}` always receives `${3}`", + }, + { + exp: regexp.MustCompile(`^(\S+) - result (\S+) is always (\S+)`), + repl: "`${1}` - result `${2}` is always `${3}`", + }, + }, + "govet": { + { + // printf + exp: regexp.MustCompile(`^printf: (\S+) arg list ends with redundant newline$`), + repl: "printf: `${1}` arg list ends with redundant newline", + }, + }, + "gosec": { + { + exp: regexp.MustCompile(`^TLS InsecureSkipVerify set true.$`), + repl: "TLS `InsecureSkipVerify` set true.", + }, + }, + "gosimple": { + { + // s1011 + exp: regexp.MustCompile(`should replace loop with (.*)$`), + repl: "should replace loop with `${1}`", + }, + { + // s1000 + exp: regexp.MustCompile(`should use a simple channel send/receive instead of select with a single case`), + repl: "should use a simple channel send/receive instead of `select` with a single case", + }, + { + // s1002 + exp: regexp.MustCompile(`should omit comparison to bool constant, can be simplified to (.+)$`), + repl: "should omit comparison to bool constant, can be simplified to `${1}`", + }, + { + // s1023 + exp: regexp.MustCompile(`redundant return statement$`), + repl: "redundant `return` statement", + }, + { + // s1017 + exp: regexp.MustCompile(`should replace this if statement with an unconditional strings.TrimPrefix`), + repl: "should replace this `if` statement with an unconditional `strings.TrimPrefix`", + }, + }, + "staticcheck": { + { + // sa4006 + exp: regexp.MustCompile(`this value of (\S+) is never used$`), + repl: "this value of `${1}` is never used", + }, + { + // s1012 + exp: regexp.MustCompile(`should use time.Since instead of time.Now\(\).Sub$`), + repl: "should use `time.Since` instead of `time.Now().Sub`", + }, + { + // sa5001 + exp: regexp.MustCompile(`should check returned error before deferring response.Close\(\)$`), + repl: "should check returned error before deferring `response.Close()`", + }, + { + // sa4003 + exp: regexp.MustCompile(`no value of type uint is less than 0$`), + repl: "no value of type `uint` is less than `0`", + }, + }, + "unused": { + { + exp: regexp.MustCompile(`(func|const|field|type|var) (\S+) is unused$`), + repl: "${1} `${2}` is unused", + }, + }, + }, } } -func (IdentifierMarker) Name() string { +func (*IdentifierMarker) Name() string { return "identifier_marker" } -func (p IdentifierMarker) Process(issues []result.Issue) ([]result.Issue, error) { +func (p *IdentifierMarker) Process(issues []result.Issue) ([]result.Issue, error) { return transformIssues(issues, func(issue *result.Issue) *result.Issue { + re, ok := p.patterns[issue.FromLinter] + if !ok { + return issue + } + newIssue := *issue - newIssue.Text = p.markIdentifiers(newIssue.Text) + newIssue.Text = markIdentifiers(re, newIssue.Text) + return &newIssue }), nil } -func (IdentifierMarker) Finish() {} +func (*IdentifierMarker) Finish() {} -func (p IdentifierMarker) markIdentifiers(s string) string { - for _, rr := range p.replaceRegexps { - rs := rr.re.ReplaceAllString(s, rr.repl) - if rs != s { +func markIdentifiers(re []replacePattern, text string) string { + for _, rr := range re { + rs := rr.exp.ReplaceAllString(text, rr.repl) + if rs != text { return rs } } - return s + return text } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go index 3f6cfc540..042675b59 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/invalid_issue.go @@ -9,6 +9,9 @@ import ( var _ Processor = (*InvalidIssue)(nil) +// InvalidIssue filters invalid reports. +// - non-go files (except `go.mod`) +// - reports without file path type InvalidIssue struct { log logutils.Log } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go index 0680c3f29..ced200af7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -8,6 +8,7 @@ import ( var _ Processor = (*MaxFromLinter)(nil) +// MaxFromLinter limits the number of reports from the same linter. type MaxFromLinter struct { linterCounter map[string]int limit int @@ -34,11 +35,6 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { - if issue.Replacement != nil && p.cfg.Issues.NeedFix { - // we need to fix all issues at once => we need to return all of them - return true - } - p.linterCounter[issue.FromLinter]++ // always inc for stat return p.linterCounter[issue.FromLinter] <= p.limit diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go index a39c98473..7c59b5dd6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go @@ -7,6 +7,7 @@ import ( var _ Processor = (*MaxPerFileFromLinter)(nil) +// MaxPerFileFromLinter limits the number of reports by file and by linter. type MaxPerFileFromLinter struct { fileLinterCounter fileLinterCounter maxPerFileFromLinterConfig map[string]int @@ -20,6 +21,7 @@ func NewMaxPerFileFromLinter(cfg *config.Config) *MaxPerFileFromLinter { // otherwise we need to fix all issues in the file at once maxPerFileFromLinterConfig["gofmt"] = 1 maxPerFileFromLinterConfig["goimports"] = 1 + maxPerFileFromLinterConfig["gci"] = 1 } return &MaxPerFileFromLinter{ diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go index 1647cace0..349f6a9af 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -10,6 +10,7 @@ import ( var _ Processor = (*MaxSameIssues)(nil) +// MaxSameIssues limits the number of reports with the same text. type MaxSameIssues struct { textCounter map[string]int limit int @@ -36,12 +37,8 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { - if issue.Replacement != nil && p.cfg.Issues.NeedFix { - // we need to fix all issues at once => we need to return all of them - return true - } - p.textCounter[issue.Text]++ // always inc for stat + return p.textCounter[issue.Text] <= p.limit }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint_filter.go similarity index 86% rename from vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go rename to vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint_filter.go index 7794bd3ec..99cd79995 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint_filter.go @@ -4,12 +4,12 @@ import ( "go/ast" "go/parser" "go/token" + "maps" "regexp" + "slices" "sort" "strings" - "golang.org/x/exp/maps" - "github.com/golangci/golangci-lint/pkg/golinters/nolintlint" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -17,9 +17,9 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -var _ Processor = (*Nolint)(nil) +var _ Processor = (*NolintFilter)(nil) -var nolintDebugf = logutils.Debug(logutils.DebugKeyNolint) +var nolintDebugf = logutils.Debug(logutils.DebugKeyNolintFilter) type ignoredRange struct { linters []string @@ -64,7 +64,8 @@ type fileData struct { ignoredRanges []ignoredRange } -type Nolint struct { +// NolintFilter filters and sorts reports related to `nolint` directives. +type NolintFilter struct { fileCache map[string]*fileData dbManager *lintersdb.Manager enabledLinters map[string]*linter.Config @@ -75,8 +76,8 @@ type Nolint struct { pattern *regexp.Regexp } -func NewNolint(log logutils.Log, dbManager *lintersdb.Manager, enabledLinters map[string]*linter.Config) *Nolint { - return &Nolint{ +func NewNolintFilter(log logutils.Log, dbManager *lintersdb.Manager, enabledLinters map[string]*linter.Config) *NolintFilter { + return &NolintFilter{ fileCache: map[string]*fileData{}, dbManager: dbManager, enabledLinters: enabledLinters, @@ -86,28 +87,27 @@ func NewNolint(log logutils.Log, dbManager *lintersdb.Manager, enabledLinters ma } } -func (*Nolint) Name() string { - return "nolint" +func (*NolintFilter) Name() string { + return "nolint_filter" } -func (p *Nolint) Process(issues []result.Issue) ([]result.Issue, error) { +func (p *NolintFilter) Process(issues []result.Issue) ([]result.Issue, error) { // put nolintlint issues last because we process other issues first to determine which nolint directives are unused sort.Stable(sortWithNolintlintLast(issues)) return filterIssuesErr(issues, p.shouldPassIssue) } -func (p *Nolint) Finish() { +func (p *NolintFilter) Finish() { if len(p.unknownLintersSet) == 0 { return } - unknownLinters := maps.Keys(p.unknownLintersSet) - sort.Strings(unknownLinters) + unknownLinters := slices.Sorted(maps.Keys(p.unknownLintersSet)) p.log.Warnf("Found unknown linters in //nolint directives: %s", strings.Join(unknownLinters, ", ")) } -func (p *Nolint) shouldPassIssue(issue *result.Issue) (bool, error) { +func (p *NolintFilter) shouldPassIssue(issue *result.Issue) (bool, error) { nolintDebugf("got issue: %v", *issue) // don't expect disabled linters to cover their nolint statements @@ -142,7 +142,7 @@ func (p *Nolint) shouldPassIssue(issue *result.Issue) (bool, error) { return true, nil } -func (p *Nolint) getOrCreateFileData(issue *result.Issue) *fileData { +func (p *NolintFilter) getOrCreateFileData(issue *result.Issue) *fileData { fd := p.fileCache[issue.FilePath()] if fd != nil { return fd @@ -169,7 +169,7 @@ func (p *Nolint) getOrCreateFileData(issue *result.Issue) *fileData { return fd } -func (p *Nolint) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, filePath string) []ignoredRange { +func (p *NolintFilter) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, filePath string) []ignoredRange { inlineRanges := p.extractFileCommentsInlineRanges(fset, f.Comments...) nolintDebugf("file %s: inline nolint ranges are %+v", filePath, inlineRanges) @@ -191,7 +191,7 @@ func (p *Nolint) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, fil return allRanges } -func (p *Nolint) extractFileCommentsInlineRanges(fset *token.FileSet, comments ...*ast.CommentGroup) []ignoredRange { +func (p *NolintFilter) extractFileCommentsInlineRanges(fset *token.FileSet, comments ...*ast.CommentGroup) []ignoredRange { var ret []ignoredRange for _, g := range comments { for _, c := range g.List { @@ -205,7 +205,7 @@ func (p *Nolint) extractFileCommentsInlineRanges(fset *token.FileSet, comments . return ret } -func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *token.FileSet) *ignoredRange { +func (p *NolintFilter) extractInlineRangeFromComment(text string, g ast.Node, fset *token.FileSet) *ignoredRange { text = strings.TrimLeft(text, "/ ") if !p.pattern.MatchString(text) { return nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_absoluter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_absoluter.go new file mode 100644 index 000000000..a649716d5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_absoluter.go @@ -0,0 +1,44 @@ +package processors + +import ( + "path/filepath" + + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var _ Processor = (*PathAbsoluter)(nil) + +// PathAbsoluter ensures that representation of path are absolute. +type PathAbsoluter struct { + log logutils.Log +} + +func NewPathAbsoluter(log logutils.Log) *PathAbsoluter { + return &PathAbsoluter{log: log.Child(logutils.DebugKeyPathAbsoluter)} +} + +func (*PathAbsoluter) Name() string { + return "path_absoluter" +} + +func (p *PathAbsoluter) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + if filepath.IsAbs(issue.FilePath()) { + return issue + } + + absPath, err := filepath.Abs(issue.FilePath()) + if err != nil { + p.log.Warnf("failed to get absolute path for %q: %v", issue.FilePath(), err) + return nil + } + + newIssue := issue + newIssue.Pos.Filename = absPath + + return newIssue + }), nil +} + +func (*PathAbsoluter) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go deleted file mode 100644 index 8036e3fd6..000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go +++ /dev/null @@ -1,36 +0,0 @@ -package processors - -import ( - "github.com/golangci/golangci-lint/pkg/fsutils" - "github.com/golangci/golangci-lint/pkg/result" -) - -var _ Processor = (*PathPrefixer)(nil) - -// PathPrefixer adds a customizable prefix to every output path -type PathPrefixer struct { - prefix string -} - -// NewPathPrefixer returns a new path prefixer for the provided string -func NewPathPrefixer(prefix string) *PathPrefixer { - return &PathPrefixer{prefix: prefix} -} - -// Name returns the name of this processor -func (*PathPrefixer) Name() string { - return "path_prefixer" -} - -// Process adds the prefix to each path -func (p *PathPrefixer) Process(issues []result.Issue) ([]result.Issue, error) { - if p.prefix != "" { - for i := range issues { - issues[i].Pos.Filename = fsutils.WithPathPrefix(p.prefix, issues[i].Pos.Filename) - } - } - return issues, nil -} - -// Finish is implemented to satisfy the Processor interface -func (*PathPrefixer) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go index c5c27357c..6a04b1c35 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prettifier.go @@ -1,40 +1,39 @@ package processors import ( - "path/filepath" - "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) var _ Processor = (*PathPrettifier)(nil) +// PathPrettifier modifies report file path to be relative to the base path. +// Also handles the `output.path-prefix` option. type PathPrettifier struct { + prefix string + log logutils.Log } -func NewPathPrettifier() *PathPrettifier { - return &PathPrettifier{} +func NewPathPrettifier(log logutils.Log, prefix string) *PathPrettifier { + return &PathPrettifier{ + prefix: prefix, + log: log.Child(logutils.DebugKeyPathPrettifier), + } } -func (PathPrettifier) Name() string { +func (*PathPrettifier) Name() string { return "path_prettifier" } -func (PathPrettifier) Process(issues []result.Issue) ([]result.Issue, error) { +func (p *PathPrettifier) Process(issues []result.Issue) ([]result.Issue, error) { return transformIssues(issues, func(issue *result.Issue) *result.Issue { - if !filepath.IsAbs(issue.FilePath()) { - return issue - } + newIssue := issue - rel, err := fsutils.ShortestRelPath(issue.FilePath(), "") - if err != nil { - return issue - } + newIssue.Pos.Filename = fsutils.WithPathPrefix(p.prefix, issue.RelativePath) - newIssue := issue - newIssue.Pos.Filename = rel return newIssue }), nil } -func (PathPrettifier) Finish() {} +func (*PathPrettifier) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_relativity.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_relativity.go new file mode 100644 index 000000000..bdaf1b98d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_relativity.go @@ -0,0 +1,60 @@ +package processors + +import ( + "fmt" + "path/filepath" + + "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/logutils" + "github.com/golangci/golangci-lint/pkg/result" +) + +var _ Processor = (*PathRelativity)(nil) + +// PathRelativity computes [result.Issue.RelativePath] and [result.Issue.WorkingDirectoryRelativePath], +// based on the base path. +type PathRelativity struct { + log logutils.Log + basePath string + workingDirectory string +} + +func NewPathRelativity(log logutils.Log, basePath string) (*PathRelativity, error) { + wd, err := fsutils.Getwd() + if err != nil { + return nil, fmt.Errorf("error getting working directory: %w", err) + } + + return &PathRelativity{ + log: log.Child(logutils.DebugKeyPathRelativity), + basePath: basePath, + workingDirectory: wd, + }, nil +} + +func (*PathRelativity) Name() string { + return "path_relativity" +} + +func (p *PathRelativity) Process(issues []result.Issue) ([]result.Issue, error) { + return transformIssues(issues, func(issue *result.Issue) *result.Issue { + newIssue := *issue + + var err error + newIssue.RelativePath, err = filepath.Rel(p.basePath, issue.FilePath()) + if err != nil { + p.log.Warnf("Getting relative path (basepath): %v", err) + return nil + } + + newIssue.WorkingDirectoryRelativePath, err = filepath.Rel(p.workingDirectory, issue.FilePath()) + if err != nil { + p.log.Warnf("Getting relative path (wd): %v", err) + return nil + } + + return &newIssue + }), nil +} + +func (*PathRelativity) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go index b161e86c2..0c0288269 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go @@ -10,6 +10,8 @@ import ( var _ Processor = (*PathShortener)(nil) +// PathShortener modifies text of the reports to reduce file path inside the text. +// It uses the rooted path name corresponding to the current directory (`wd`). type PathShortener struct { wd string } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go index 93a26586d..2dacf6638 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go @@ -1,7 +1,7 @@ package processors import ( - "regexp" + "cmp" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" @@ -13,11 +13,10 @@ const severityFromLinter = "@linter" var _ Processor = (*Severity)(nil) -type severityRule struct { - baseRule - severity string -} - +// Severity modifies report severity. +// It uses the same `baseRule` structure as [ExcludeRules] processor. +// +// Warning: it doesn't use `path-prefix` option. type Severity struct { name string @@ -43,7 +42,7 @@ func NewSeverity(log logutils.Log, files *fsutils.Files, cfg *config.Severity) * p.name = "severity-rules-case-sensitive" } - p.rules = createSeverityRules(cfg.Rules, prefix) + p.rules = parseRules(cfg.Rules, prefix, newSeverityRule) return p } @@ -67,10 +66,7 @@ func (p *Severity) transform(issue *result.Issue) *result.Issue { return issue } - issue.Severity = rule.severity - if issue.Severity == "" { - issue.Severity = p.defaultSeverity - } + issue.Severity = cmp.Or(rule.severity, p.defaultSeverity) return issue } @@ -83,34 +79,14 @@ func (p *Severity) transform(issue *result.Issue) *result.Issue { return issue } -func createSeverityRules(rules []config.SeverityRule, prefix string) []severityRule { - parsedRules := make([]severityRule, 0, len(rules)) - - for _, rule := range rules { - parsedRule := severityRule{} - parsedRule.linters = rule.Linters - parsedRule.severity = rule.Severity - - if rule.Text != "" { - parsedRule.text = regexp.MustCompile(prefix + rule.Text) - } - - if rule.Source != "" { - parsedRule.source = regexp.MustCompile(prefix + rule.Source) - } - - if rule.Path != "" { - path := fsutils.NormalizePathInRegex(rule.Path) - parsedRule.path = regexp.MustCompile(path) - } - - if rule.PathExcept != "" { - pathExcept := fsutils.NormalizePathInRegex(rule.PathExcept) - parsedRule.pathExcept = regexp.MustCompile(pathExcept) - } +type severityRule struct { + baseRule + severity string +} - parsedRules = append(parsedRules, parsedRule) +func newSeverityRule(rule *config.SeverityRule, prefix string) severityRule { + return severityRule{ + baseRule: newBaseRule(&rule.BaseRule, prefix), + severity: rule.Severity, } - - return parsedRules } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go index 39dbfd1d3..8900c96a9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go @@ -26,23 +26,30 @@ type skipStat struct { count int } +// SkipDirs filters reports based on directory names. +// It uses the shortest relative paths and `path-prefix` option. +// TODO(ldez): should be removed in v2. type SkipDirs struct { - patterns []*regexp.Regexp - log logutils.Log + patterns []*regexp.Regexp + pathPrefix string + + log logutils.Log + skippedDirs map[string]*skipStat absArgsDirs []string skippedDirsCache map[string]bool - pathPrefix string } func NewSkipDirs(log logutils.Log, patterns, args []string, pathPrefix string) (*SkipDirs, error) { var patternsRe []*regexp.Regexp for _, p := range patterns { p = fsutils.NormalizePathInRegex(p) + patternRe, err := regexp.Compile(p) if err != nil { return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) } + patternsRe = append(patternsRe, patternRe) } @@ -53,11 +60,11 @@ func NewSkipDirs(log logutils.Log, patterns, args []string, pathPrefix string) ( return &SkipDirs{ patterns: patternsRe, + pathPrefix: pathPrefix, log: log, skippedDirs: map[string]*skipStat{}, absArgsDirs: absArgsDirs, skippedDirsCache: map[string]bool{}, - pathPrefix: pathPrefix, }, nil } @@ -80,30 +87,27 @@ func (p *SkipDirs) Finish() { } func (p *SkipDirs) shouldPassIssue(issue *result.Issue) bool { - if filepath.IsAbs(issue.FilePath()) { - if isGoFile(issue.FilePath()) { - p.log.Warnf("Got abs path %s in skip dirs processor, it should be relative", issue.FilePath()) - } - return true - } - - issueRelDir := filepath.Dir(issue.FilePath()) + issueRelDir := filepath.Dir(issue.RelativePath) if toPass, ok := p.skippedDirsCache[issueRelDir]; ok { if !toPass { p.skippedDirs[issueRelDir].count++ } + return toPass } issueAbsDir, err := filepath.Abs(issueRelDir) if err != nil { p.log.Warnf("Can't abs-ify path %q: %s", issueRelDir, err) + return true } toPass := p.shouldPassIssueDirs(issueRelDir, issueAbsDir) + p.skippedDirsCache[issueRelDir] = toPass + return toPass } @@ -123,15 +127,19 @@ func (p *SkipDirs) shouldPassIssueDirs(issueRelDir, issueAbsDir string) bool { // disadvantages (https://github.com/golangci/golangci-lint/pull/313). path := fsutils.WithPathPrefix(p.pathPrefix, issueRelDir) + for _, pattern := range p.patterns { if pattern.MatchString(path) { ps := pattern.String() + if p.skippedDirs[issueRelDir] == nil { p.skippedDirs[issueRelDir] = &skipStat{ pattern: ps, } } + p.skippedDirs[issueRelDir].count++ + return false } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go index 3b17a9f32..5907cf677 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go @@ -10,6 +10,10 @@ import ( var _ Processor = (*SkipFiles)(nil) +// SkipFiles filters reports based on filename. +// +// It uses the shortest relative paths and `path-prefix` option. +// TODO(ldez): should be removed in v2. type SkipFiles struct { patterns []*regexp.Regexp pathPrefix string @@ -34,26 +38,28 @@ func NewSkipFiles(patterns []string, pathPrefix string) (*SkipFiles, error) { }, nil } -func (SkipFiles) Name() string { +func (*SkipFiles) Name() string { return "skip_files" } -func (p SkipFiles) Process(issues []result.Issue) ([]result.Issue, error) { +func (p *SkipFiles) Process(issues []result.Issue) ([]result.Issue, error) { if len(p.patterns) == 0 { return issues, nil } - return filterIssues(issues, func(issue *result.Issue) bool { - path := fsutils.WithPathPrefix(p.pathPrefix, issue.FilePath()) + return filterIssues(issues, p.shouldPassIssue), nil +} + +func (*SkipFiles) Finish() {} + +func (p *SkipFiles) shouldPassIssue(issue *result.Issue) bool { + path := fsutils.WithPathPrefix(p.pathPrefix, issue.RelativePath) - for _, pattern := range p.patterns { - if pattern.MatchString(path) { - return false - } + for _, pattern := range p.patterns { + if pattern.MatchString(path) { + return false } + } - return true - }), nil + return true } - -func (SkipFiles) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go index 7eebea631..96f5574f6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go @@ -10,11 +10,6 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -// Base propose of this functionality to sort results (issues) -// produced by various linters by analyzing code. We're achieving this -// by sorting results.Issues using processor step, and chain based -// rules that can compare different properties of the Issues struct. - const ( orderNameFile = "file" orderNameLinter = "linter" @@ -31,13 +26,17 @@ var _ Processor = (*SortResults)(nil) type issueComparator func(a, b *result.Issue) int +// SortResults sorts reports based on criteria: +// - file names, line numbers, positions +// - linter names +// - severity names type SortResults struct { cmps map[string][]issueComparator cfg *config.Output } -func NewSortResults(cfg *config.Config) *SortResults { +func NewSortResults(cfg *config.Output) *SortResults { return &SortResults{ cmps: map[string][]issueComparator{ // For sorting we are comparing (in next order): @@ -48,7 +47,7 @@ func NewSortResults(cfg *config.Config) *SortResults { // For sorting we are comparing: severity orderNameSeverity: {bySeverity}, }, - cfg: &cfg.Output, + cfg: cfg, } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go index 4a89fc73e..3f20b2f56 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/source_code.go @@ -8,6 +8,13 @@ import ( var _ Processor = (*SourceCode)(nil) +// SourceCode modifies displayed information based on [result.Issue.GetLineRange()]. +// +// This is used: +// - to display the "UnderLinePointer". +// - in some rare cases to display multiple lines instead of one (ex: `dupl`) +// +// It requires to use [fsutils.LineCache] ([fsutils.FileCache]) to get the file information before the fixes. type SourceCode struct { lineCache *fsutils.LineCache log logutils.Log diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go index 115196d9a..729842122 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -1,7 +1,6 @@ package processors import ( - "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/result" ) @@ -9,15 +8,16 @@ const uniqByLineLimit = 1 var _ Processor = (*UniqByLine)(nil) +// UniqByLine filters reports to keep only one report by line of code. type UniqByLine struct { fileLineCounter fileLineCounter - cfg *config.Config + enabled bool } -func NewUniqByLine(cfg *config.Config) *UniqByLine { +func NewUniqByLine(enable bool) *UniqByLine { return &UniqByLine{ fileLineCounter: fileLineCounter{}, - cfg: cfg, + enabled: enable, } } @@ -26,7 +26,7 @@ func (*UniqByLine) Name() string { } func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { - if !p.cfg.Output.UniqByLine { + if !p.enabled { return issues, nil } @@ -36,12 +36,6 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { func (*UniqByLine) Finish() {} func (p *UniqByLine) shouldPassIssue(issue *result.Issue) bool { - if issue.Replacement != nil && p.cfg.Issues.NeedFix { - // if issue will be auto-fixed we shouldn't collapse issues: - // e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them. - return true - } - if p.fileLineCounter.GetCount(issue) == uniqByLineLimit { return false } diff --git a/vendor/github.com/golangci/modinfo/.gitignore b/vendor/github.com/golangci/modinfo/.gitignore deleted file mode 100644 index 9f11b755a..000000000 --- a/vendor/github.com/golangci/modinfo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea/ diff --git a/vendor/github.com/golangci/modinfo/.golangci.yml b/vendor/github.com/golangci/modinfo/.golangci.yml deleted file mode 100644 index 9698182f2..000000000 --- a/vendor/github.com/golangci/modinfo/.golangci.yml +++ /dev/null @@ -1,157 +0,0 @@ -run: - timeout: 7m - -linters-settings: - govet: - enable: - - shadow - gocyclo: - min-complexity: 12 - goconst: - min-len: 3 - min-occurrences: 3 - funlen: - lines: -1 - statements: 50 - misspell: - locale: US - depguard: - rules: - main: - deny: - - pkg: "github.com/instana/testify" - desc: not allowed - - pkg: "github.com/pkg/errors" - desc: Should be replaced by standard lib errors package - tagalign: - align: false - order: - - xml - - json - - yaml - - yml - - toml - - mapstructure - - url - godox: - keywords: - - FIXME - gocritic: - enabled-tags: - - diagnostic - - style - - performance - disabled-checks: - - paramTypeCombine # already handle by gofumpt.extra-rules - - whyNoLint # already handle by nonolint - - unnamedResult - - hugeParam - - sloppyReassign - - rangeValCopy - - octalLiteral - - ptrToRefParam - - appendAssign - - ruleguard - - httpNoBody - - exposedSyncMutex - revive: - rules: - - name: struct-tag - - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - disabled: true - - name: if-return - - name: increment-decrement - - name: var-naming - - name: var-declaration - - name: package-comments - disabled: true - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - disabled: true - - name: unreachable-code - - name: redefines-builtin-id - - tagliatelle: - case: - rules: - json: pascal - yaml: camel - xml: camel - header: header - mapstructure: camel - env: upperSnake - envconfig: upperSnake - -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - cyclop # duplicate of gocyclo - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - lll - - gosec - - dupl # not relevant - - prealloc # too many false-positive - - bodyclose # too many false-positive - - gomnd - - testpackage # not relevant - - tparallel # not relevant - - paralleltest # not relevant - - nestif # too many false-positive - - wrapcheck - - goerr113 # not relevant - - nlreturn # not relevant - - wsl # not relevant - - exhaustive # not relevant - - exhaustruct # not relevant - - makezero # not relevant - - forbidigo - - varnamelen # not relevant - - nilnil # not relevant - - ireturn # not relevant - - contextcheck # too many false-positive - - tenv # we already have a test "framework" to handle env vars - - noctx - - errchkjson - - nonamedreturns - - gosmopolitan # not relevant - - gochecknoglobals - -issues: - exclude-use-default: false - max-issues-per-linter: 0 - max-same-issues: 0 - exclude: - - 'Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' - - 'ST1000: at least one file in a package should have a package comment' - exclude-rules: - - path: (.+)_test.go - linters: - - funlen - - goconst - - maintidx diff --git a/vendor/github.com/golangci/modinfo/LICENSE b/vendor/github.com/golangci/modinfo/LICENSE deleted file mode 100644 index f288702d2..000000000 --- a/vendor/github.com/golangci/modinfo/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/vendor/github.com/golangci/modinfo/Makefile b/vendor/github.com/golangci/modinfo/Makefile deleted file mode 100644 index df91018f1..000000000 --- a/vendor/github.com/golangci/modinfo/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -.PHONY: clean check test - -default: clean check test - -clean: - rm -rf dist/ cover.out - -test: clean - go test -v -cover ./... - -check: - golangci-lint run diff --git a/vendor/github.com/golangci/modinfo/module.go b/vendor/github.com/golangci/modinfo/module.go deleted file mode 100644 index ff0b21b9b..000000000 --- a/vendor/github.com/golangci/modinfo/module.go +++ /dev/null @@ -1,157 +0,0 @@ -package modinfo - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "reflect" - "sort" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/go/analysis" -) - -type ModInfo struct { - Path string `json:"Path"` - Dir string `json:"Dir"` - GoMod string `json:"GoMod"` - GoVersion string `json:"GoVersion"` - Main bool `json:"Main"` -} - -var ( - once sync.Once - information []ModInfo - errInfo error -) - -var Analyzer = &analysis.Analyzer{ - Name: "modinfo", - Doc: "Module information", - URL: "https://github.com/golangci/modinfo", - Run: runOnce, - ResultType: reflect.TypeOf([]ModInfo(nil)), -} - -func runOnce(pass *analysis.Pass) (any, error) { - _, ok := os.LookupEnv("MODINFO_DEBUG_DISABLE_ONCE") - if ok { - return GetModuleInfo(pass) - } - - once.Do(func() { - information, errInfo = GetModuleInfo(pass) - }) - - return information, errInfo -} - -// GetModuleInfo gets modules information. -// Always returns 1 element except for workspace (returns all the modules of the workspace). -// Based on `go list -m -json` behavior. -func GetModuleInfo(pass *analysis.Pass) ([]ModInfo, error) { - // https://github.com/golang/go/issues/44753#issuecomment-790089020 - cmd := exec.Command("go", "list", "-m", "-json") - for _, file := range pass.Files { - name := pass.Fset.File(file.Pos()).Name() - if filepath.Ext(name) != ".go" { - continue - } - - cmd.Dir = filepath.Dir(name) - break - } - - out, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("command go list: %w: %s", err, string(out)) - } - - var infos []ModInfo - - for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); { - var v ModInfo - if err := dec.Decode(&v); err != nil { - return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out)) - } - - if v.GoMod == "" { - return nil, errors.New("working directory is not part of a module") - } - - if !v.Main || v.Dir == "" { - continue - } - - infos = append(infos, v) - } - - if len(infos) == 0 { - return nil, errors.New("go.mod file not found") - } - - sort.Slice(infos, func(i, j int) bool { - return len(infos[i].Path) > len(infos[j].Path) - }) - - return infos, nil -} - -// FindModuleFromPass finds the module related to the files of the pass. -func FindModuleFromPass(pass *analysis.Pass) (ModInfo, error) { - infos, ok := pass.ResultOf[Analyzer].([]ModInfo) - if !ok { - return ModInfo{}, errors.New("no modinfo analyzer result") - } - - var name string - for _, file := range pass.Files { - f := pass.Fset.File(file.Pos()).Name() - if filepath.Ext(f) != ".go" { - continue - } - - name = f - break - } - - // no Go file found in analysis pass - if name == "" { - name, _ = os.Getwd() - } - - for _, info := range infos { - if !strings.HasPrefix(name, info.Dir) { - continue - } - return info, nil - } - - return ModInfo{}, errors.New("module information not found") -} - -// ReadModuleFileFromPass read the `go.mod` file from the pass result. -func ReadModuleFileFromPass(pass *analysis.Pass) (*modfile.File, error) { - info, err := FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - return ReadModuleFile(info) -} - -// ReadModuleFile read the `go.mod` file. -func ReadModuleFile(info ModInfo) (*modfile.File, error) { - raw, err := os.ReadFile(info.GoMod) - if err != nil { - return nil, fmt.Errorf("reading go.mod file: %w", err) - } - - return modfile.Parse("go.mod", raw, nil) -} diff --git a/vendor/github.com/golangci/modinfo/readme.md b/vendor/github.com/golangci/modinfo/readme.md deleted file mode 100644 index 2175de8eb..000000000 --- a/vendor/github.com/golangci/modinfo/readme.md +++ /dev/null @@ -1,73 +0,0 @@ -# modinfo - -This module contains: -- an analyzer that returns module information. -- methods to find and read `go.mod` file - -## Examples - -```go -package main - -import ( - "fmt" - - "github.com/golangci/modinfo" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" -) - -var Analyzer = &analysis.Analyzer{ - Name: "example", - Doc: "Example", - Run: func(pass *analysis.Pass) (interface{}, error) { - file, err := modinfo.ReadModuleFileFromPass(pass) - if err != nil { - return nil, err - } - - fmt.Println("go.mod", file) - - // TODO - - return nil, nil - }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - modinfo.Analyzer, - }, -} -``` - -```go -package main - -import ( - "fmt" - - "github.com/golangci/modinfo" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" -) - -var Analyzer = &analysis.Analyzer{ - Name: "example", - Doc: "Example", - Run: func(pass *analysis.Pass) (interface{}, error) { - info, err := modinfo.FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - fmt.Println("Module", info.Dir) - - // TODO - - return nil, nil - }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - modinfo.Analyzer, - }, -} -``` diff --git a/vendor/github.com/golangci/revgrep/.golangci.yml b/vendor/github.com/golangci/revgrep/.golangci.yml index 5239720ac..f08807b12 100644 --- a/vendor/github.com/golangci/revgrep/.golangci.yml +++ b/vendor/github.com/golangci/revgrep/.golangci.yml @@ -1,5 +1,28 @@ -run: - timeout: 2m +linters: + enable-all: true + disable: + - exportloopref # deprecated + - cyclop # duplicate of gocyclo + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - dupl + - lll + - nestif + - mnd + - err113 + - nlreturn + - wsl + - exhaustive + - exhaustruct + - tparallel + - testpackage + - paralleltest + - forcetypeassert + - varnamelen + - prealloc # false-positives + - nonamedreturns + - nilerr + - depguard linters-settings: govet: @@ -23,44 +46,9 @@ linters-settings: godox: keywords: - FIXME - -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - cyclop # duplicate of gocyclo - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - dupl - - lll - - nestif - - gomnd - - goerr113 - - nlreturn - - wsl - - exhaustive - - exhaustruct - - tparallel - - testpackage - - paralleltest - - ifshort - - forcetypeassert - - varnamelen - - prealloc # false-positives - - nosnakecase - - nonamedreturns - - nilerr - - depguard + gosec: + excludes: + - G115 # integer overflow conversion issues: exclude-use-default: false @@ -78,3 +66,6 @@ issues: - path: cmd/revgrep/main.go linters: - forbidigo + +run: + timeout: 2m diff --git a/vendor/github.com/golangci/revgrep/README.md b/vendor/github.com/golangci/revgrep/README.md index 97f25ffb3..c776cb451 100644 --- a/vendor/github.com/golangci/revgrep/README.md +++ b/vendor/github.com/golangci/revgrep/README.md @@ -1,14 +1,14 @@ -# Overview +## Overview `revgrep` is a CLI tool used to filter static analysis tools to only lines changed based on a commit reference. -# Install +## Install ```bash -go get -u github.com/golangci/revgrep/... +go install github.com/golangci/revgrep/cmd/revgrep@latest ``` -# Usage +## Usage In the scenario below, a change was made causing a warning in `go vet` on line 5, but `go vet` will show all warnings. Using `revgrep`, you can show only warnings for lines of code that have been changed (in this case, hiding line 6). @@ -42,7 +42,7 @@ from-rev filters issues to lines changed since (and including) this revision Regexp to match path, line number, optional column number, and message ``` -# Other Examples +## Other Examples Issues between branches: ```bash diff --git a/vendor/github.com/golangci/revgrep/issue.go b/vendor/github.com/golangci/revgrep/issue.go new file mode 100644 index 000000000..694d41639 --- /dev/null +++ b/vendor/github.com/golangci/revgrep/issue.go @@ -0,0 +1,37 @@ +package revgrep + +// Issue contains metadata about an issue found. +type Issue struct { + // File is the name of the file as it appeared from the patch. + File string + // LineNo is the line number of the file. + LineNo int + // ColNo is the column number or 0 if none could be parsed. + ColNo int + // HunkPos is position from file's first @@, for new files this will be the line number. + // See also: https://developer.github.com/v3/pulls/comments/#create-a-comment + HunkPos int + // Issue text as it appeared from the tool. + Issue string + // Message is the issue without file name, line number and column number. + Message string +} + +// InputIssue represents issue found by some linter. +type InputIssue interface { + FilePath() string + Line() int +} + +type simpleInputIssue struct { + filePath string + lineNumber int +} + +func (i simpleInputIssue) FilePath() string { + return i.filePath +} + +func (i simpleInputIssue) Line() int { + return i.lineNumber +} diff --git a/vendor/github.com/golangci/revgrep/patch.go b/vendor/github.com/golangci/revgrep/patch.go new file mode 100644 index 000000000..81a2acd7e --- /dev/null +++ b/vendor/github.com/golangci/revgrep/patch.go @@ -0,0 +1,195 @@ +package revgrep + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os/exec" + "regexp" + "strconv" + "strings" +) + +type patchOption struct { + revisionFrom string + revisionTo string + mergeBase string +} + +// GitPatch returns a patch from a git repository. +// If no git repository was found and no errors occurred, nil is returned, +// else an error is returned revisionFrom and revisionTo defines the git diff parameters, +// if left blank and there are unstaged changes or untracked files, +// only those will be returned else only check changes since HEAD~. +// If revisionFrom is set but revisionTo is not, +// untracked files will be included, to exclude untracked files set revisionTo to HEAD~. +// It's incorrect to specify revisionTo without a revisionFrom. +func GitPatch(ctx context.Context, option patchOption) (io.Reader, []string, error) { + // check if git repo exists + if err := exec.CommandContext(ctx, "git", "status", "--porcelain").Run(); err != nil { + // don't return an error, we assume the error is not repo exists + return nil, nil, nil + } + + // make a patch for untracked files + ls, err := exec.CommandContext(ctx, "git", "ls-files", "--others", "--exclude-standard").CombinedOutput() + if err != nil { + return nil, nil, fmt.Errorf("error executing git ls-files: %w", err) + } + + var newFiles []string + for _, file := range bytes.Split(ls, []byte{'\n'}) { + if len(file) == 0 || bytes.HasSuffix(file, []byte{'/'}) { + // ls-files was sometimes showing directories when they were ignored + // I couldn't create a test case for this as I couldn't reproduce correctly for the moment, + // just exclude files with trailing / + continue + } + + newFiles = append(newFiles, string(file)) + } + + if option.mergeBase != "" { + var base string + base, err = getMergeBase(ctx, option.mergeBase) + if err != nil { + return nil, nil, err + } + + if base != "" { + option.revisionFrom = base + } + } + + if option.revisionFrom != "" { + args := []string{option.revisionFrom} + + if option.revisionTo != "" { + args = append(args, option.revisionTo) + } + + args = append(args, "--") + + patch, errDiff := gitDiff(ctx, args...) + if errDiff != nil { + return nil, nil, errDiff + } + + if option.revisionTo == "" { + return patch, newFiles, nil + } + + return patch, nil, nil + } + + // make a patch for unstaged changes + patch, err := gitDiff(ctx, "--") + if err != nil { + return nil, nil, err + } + + unstaged := patch.Len() > 0 + + // If there's unstaged changes OR untracked changes (or both), + // then this is a suitable patch + if unstaged || newFiles != nil { + return patch, newFiles, nil + } + + // check for changes in recent commit + patch, err = gitDiff(ctx, "HEAD~", "--") + if err != nil { + return nil, nil, err + } + + return patch, nil, nil +} + +func gitDiff(ctx context.Context, extraArgs ...string) (*bytes.Buffer, error) { + cmd := exec.CommandContext(ctx, "git", "diff", "--color=never", "--no-ext-diff") + + if isSupportedByGit(ctx, 2, 41, 0) { + cmd.Args = append(cmd.Args, "--default-prefix") + } + + cmd.Args = append(cmd.Args, "--relative") + cmd.Args = append(cmd.Args, extraArgs...) + + patch := new(bytes.Buffer) + errBuff := new(bytes.Buffer) + + cmd.Stdout = patch + cmd.Stderr = errBuff + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("error executing %q: %w: %w", strings.Join(cmd.Args, " "), err, readAsError(errBuff)) + } + + return patch, nil +} + +func readAsError(buff io.Reader) error { + output, err := io.ReadAll(buff) + if err != nil { + return fmt.Errorf("read stderr: %w", err) + } + + return errors.New(string(output)) +} + +func isSupportedByGit(ctx context.Context, major, minor, patch int) bool { + output, err := exec.CommandContext(ctx, "git", "version").CombinedOutput() + if err != nil { + return false + } + + parts := bytes.Split(bytes.TrimSpace(output), []byte(" ")) + if len(parts) < 3 { + return false + } + + v := string(parts[2]) + if v == "" { + return false + } + + vp := regexp.MustCompile(`^(\d+)\.(\d+)(?:\.(\d+))?.*$`).FindStringSubmatch(v) + if len(vp) < 4 { + return false + } + + currentMajor, err := strconv.Atoi(vp[1]) + if err != nil { + return false + } + + currentMinor, err := strconv.Atoi(vp[2]) + if err != nil { + return false + } + + currentPatch, err := strconv.Atoi(vp[3]) + if err != nil { + return false + } + + return currentMajor*1_000_000_000+currentMinor*1_000_000+currentPatch*1_000 >= major*1_000_000_000+minor*1_000_000+patch*1_000 +} + +func getMergeBase(ctx context.Context, base string) (string, error) { + cmd := exec.CommandContext(ctx, "git", "merge-base", base, "HEAD") + + patch := new(bytes.Buffer) + errBuff := new(bytes.Buffer) + + cmd.Stdout = patch + cmd.Stderr = errBuff + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("error executing %q: %w: %w", strings.Join(cmd.Args, " "), err, readAsError(errBuff)) + } + + return strings.TrimSpace(patch.String()), nil +} diff --git a/vendor/github.com/golangci/revgrep/revgrep.go b/vendor/github.com/golangci/revgrep/revgrep.go index 1ef81b203..ca4ac791c 100644 --- a/vendor/github.com/golangci/revgrep/revgrep.go +++ b/vendor/github.com/golangci/revgrep/revgrep.go @@ -3,12 +3,11 @@ package revgrep import ( "bufio" - "bytes" + "context" "errors" "fmt" "io" "os" - "os/exec" "path/filepath" "regexp" "strconv" @@ -30,96 +29,71 @@ type Checker struct { Debug io.Writer // RevisionFrom check revision starting at, leave blank for auto-detection ignored if patch is set. RevisionFrom string - // WholeFiles indicates that the user wishes to see all issues that comes up anywhere in any file that has been changed in this revision or patch. - WholeFiles bool // RevisionTo checks revision finishing at, leave blank for auto-detection ignored if patch is set. RevisionTo string + // MergeBase checks revision starting at the best common ancestor, leave blank for auto-detection ignored if patch is set. + MergeBase string + // WholeFiles indicates that the user wishes to see all issues that comes up anywhere in any file that has been changed in this revision or patch. + WholeFiles bool // Regexp to match path, line number, optional column number, and message. Regexp string // AbsPath is used to make an absolute path of an issue's filename to be relative in order to match patch file. // If not set, current working directory is used. AbsPath string - // Calculated changes for next calls to IsNewIssue + // Calculated changes for next calls to [Checker.IsNewIssue]/[Checker.IsNew]. changes map[string][]pos } -// Issue contains metadata about an issue found. -type Issue struct { - // File is the name of the file as it appeared from the patch. - File string - // LineNo is the line number of the file. - LineNo int - // ColNo is the column number or 0 if none could be parsed. - ColNo int - // HunkPos is position from file's first @@, for new files this will be the line number. - // See also: https://developer.github.com/v3/pulls/comments/#create-a-comment - HunkPos int - // Issue text as it appeared from the tool. - Issue string - // Message is the issue without file name, line number and column number. - Message string -} - -// InputIssue represents issue found by some linter. -type InputIssue interface { - FilePath() string - Line() int -} - -type simpleInputIssue struct { - filePath string - lineNumber int -} - -type pos struct { - lineNo int // line number - hunkPos int // position relative to first @@ in file -} - -func (i simpleInputIssue) FilePath() string { - return i.filePath -} - -func (i simpleInputIssue) Line() int { - return i.lineNumber -} - // Prepare extracts a patch and changed lines. -func (c *Checker) Prepare() error { - returnErr := c.preparePatch() +// +// WARNING: it should only be used before an explicit call to [Checker.IsNewIssue]/[Checker.IsNew]. +// +// WARNING: only [Checker.Patch], [Checker.RevisionFrom], [Checker.RevisionTo], [Checker.WholeFiles] options are used, +// the other options ([Checker.Regexp], [Checker.AbsPath]) are only used by [Checker.Check]. +func (c *Checker) Prepare(ctx context.Context) error { + err := c.loadPatch(ctx) + c.changes = c.linesChanged() - return returnErr + + return err } -// IsNewIssue checks whether issue found by linter is new: it was found in changed lines. -func (c *Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { - fchanges, ok := c.changes[filepath.ToSlash(i.FilePath())] - if !ok { // file wasn't changed +// IsNew checks whether issue found by linter is new: it was found in changed lines. +// +// WARNING: it requires to call [Checker.Prepare] before call this method to load the changes from patch. +func (c *Checker) IsNew(filePath string, line int) (hunkPos int, isNew bool) { + changes, ok := c.changes[filepath.ToSlash(filePath)] + if !ok { + // file wasn't changed return 0, false } if c.WholeFiles { - return i.Line(), true + return line, true } var ( fpos pos changed bool ) + // found file, see if lines matched - for _, pos := range fchanges { - if pos.lineNo == i.Line() { + for _, pos := range changes { + if pos.lineNo == line { fpos = pos changed = true + break } } - if changed || fchanges == nil { + if changed || changes == nil { // either file changed or it's a new file hunkPos := fpos.lineNo - if changed { // existing file changed + + // existing file changed + if changed { hunkPos = fpos.hunkPos } @@ -129,7 +103,14 @@ func (c *Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { return 0, false } -// Check scans reader and writes any lines to writer that have been added in Checker.Patch. +// IsNewIssue checks whether issue found by linter is new: it was found in changed lines. +// +// WARNING: it requires to call [Checker.Prepare] before call this method to load the changes from patch. +func (c *Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { + return c.IsNew(i.FilePath(), i.Line()) +} + +// Check scans reader and writes any lines to writer that have been added in [Checker.Patch]. // // Returns the issues written to writer when no error occurs. // @@ -137,9 +118,10 @@ func (c *Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { // all issues are written to writer and an error is returned. // // File paths in reader must be relative to current working directory or absolute. -func (c *Checker) Check(reader io.Reader, writer io.Writer) (issues []Issue, err error) { - returnErr := c.Prepare() - writeAll := returnErr != nil +func (c *Checker) Check(ctx context.Context, reader io.Reader, writer io.Writer) (issues []Issue, err error) { + errPrepare := c.Prepare(ctx) + + writeAll := errPrepare != nil // file.go:lineNo:colNo:message // colNo is optional, strip spaces before message @@ -159,7 +141,7 @@ func (c *Checker) Check(reader io.Reader, writer io.Writer) (issues []Issue, err if absPath == "" { absPath, err = os.Getwd() if err != nil { - returnErr = fmt.Errorf("could not get current working directory: %w", err) + errPrepare = fmt.Errorf("could not get current working directory: %w", err) } } @@ -227,30 +209,41 @@ func (c *Checker) Check(reader io.Reader, writer io.Writer) (issues []Issue, err } if err := scanner.Err(); err != nil { - returnErr = fmt.Errorf("error reading standard input: %w", err) + errPrepare = fmt.Errorf("error reading standard input: %w", err) } - return issues, returnErr + return issues, errPrepare } -func (c *Checker) debugf(format string, s ...interface{}) { - if c.Debug != nil { - _, _ = fmt.Fprint(c.Debug, "DEBUG: ") - _, _ = fmt.Fprintf(c.Debug, format+"\n", s...) +func (c *Checker) debugf(format string, s ...any) { + if c.Debug == nil { + return } + + _, _ = fmt.Fprint(c.Debug, "DEBUG: ") + _, _ = fmt.Fprintf(c.Debug, format+"\n", s...) } -func (c *Checker) preparePatch() error { - // Check if patch is supplied, if not, retrieve from VCS +// loadPatch checks if patch is supplied, if not, retrieve from VCS. +func (c *Checker) loadPatch(ctx context.Context) error { + if c.Patch != nil { + return nil + } + + option := patchOption{ + revisionFrom: c.RevisionFrom, + revisionTo: c.RevisionTo, + mergeBase: c.MergeBase, + } + + var err error + c.Patch, c.NewFiles, err = GitPatch(ctx, option) + if err != nil { + return fmt.Errorf("could not read git repo: %w", err) + } + if c.Patch == nil { - var err error - c.Patch, c.NewFiles, err = GitPatch(c.RevisionFrom, c.RevisionTo) - if err != nil { - return fmt.Errorf("could not read git repo: %w", err) - } - if c.Patch == nil { - return errors.New("no version control repository found") - } + return errors.New("no version control repository found") } return nil @@ -287,15 +280,19 @@ func (c *Checker) linesChanged() map[string][]pos { // it's likey part of a file and not relevant to the patch. continue } + if err != nil { scanErr = err break } + line := strings.TrimRight(string(lineB), "\n") c.debugf(line) + s.lineNo++ s.hunkPos++ + switch { case strings.HasPrefix(line, "+++ ") && len(line) > 4: if s.changes != nil { @@ -304,6 +301,7 @@ func (c *Checker) linesChanged() map[string][]pos { } // 6 removes "+++ b/" s = state{file: line[6:], hunkPos: -1, changes: []pos{}} + case strings.HasPrefix(line, "@@ "): // @@ -1 +2,4 @@ // chdr ^^^^^^^^^^^^^ @@ -311,14 +309,18 @@ func (c *Checker) linesChanged() map[string][]pos { // cstart ^ chdr := strings.Split(line, " ") ahdr := strings.Split(chdr[2], ",") + // [1:] to remove leading plus cstart, err := strconv.ParseUint(ahdr[0][1:], 10, 64) if err != nil { panic(err) } + s.lineNo = int(cstart) - 1 // -1 as cstart is the next line number + case strings.HasPrefix(line, "-"): s.lineNo-- + case strings.HasPrefix(line, "+"): s.changes = append(s.changes, pos{lineNo: s.lineNo, hunkPos: s.hunkPos}) } @@ -334,150 +336,9 @@ func (c *Checker) linesChanged() map[string][]pos { return changes } -// GitPatch returns a patch from a git repository. -// If no git repository was found and no errors occurred, nil is returned, -// else an error is returned revisionFrom and revisionTo defines the git diff parameters, -// if left blank and there are unstaged changes or untracked files, -// only those will be returned else only check changes since HEAD~. -// If revisionFrom is set but revisionTo is not, -// untracked files will be included, to exclude untracked files set revisionTo to HEAD~. -// It's incorrect to specify revisionTo without a revisionFrom. -func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { - // check if git repo exists - if err := exec.Command("git", "status", "--porcelain").Run(); err != nil { - // don't return an error, we assume the error is not repo exists - return nil, nil, nil - } - - // make a patch for untracked files - ls, err := exec.Command("git", "ls-files", "--others", "--exclude-standard").CombinedOutput() - if err != nil { - return nil, nil, fmt.Errorf("error executing git ls-files: %w", err) - } - - var newFiles []string - for _, file := range bytes.Split(ls, []byte{'\n'}) { - if len(file) == 0 || bytes.HasSuffix(file, []byte{'/'}) { - // ls-files was sometimes showing directories when they were ignored - // I couldn't create a test case for this as I couldn't reproduce correctly for the moment, - // just exclude files with trailing / - continue - } - - newFiles = append(newFiles, string(file)) - } - - if revisionFrom != "" { - args := []string{revisionFrom} - - if revisionTo != "" { - args = append(args, revisionTo) - } - - args = append(args, "--") - - patch, errDiff := gitDiff(args...) - if errDiff != nil { - return nil, nil, errDiff - } - - if revisionTo == "" { - return patch, newFiles, nil - } - - return patch, nil, nil - } - - // make a patch for unstaged changes - patch, err := gitDiff("--") - if err != nil { - return nil, nil, err - } - - unstaged := patch.Len() > 0 - - // If there's unstaged changes OR untracked changes (or both), - // then this is a suitable patch - if unstaged || newFiles != nil { - return patch, newFiles, nil - } - - // check for changes in recent commit - patch, err = gitDiff("HEAD~", "--") - if err != nil { - return nil, nil, err - } - - return patch, nil, nil -} - -func gitDiff(extraArgs ...string) (*bytes.Buffer, error) { - cmd := exec.Command("git", "diff", "--color=never", "--no-ext-diff") - - if isSupportedByGit(2, 41, 0) { - cmd.Args = append(cmd.Args, "--default-prefix") - } - - cmd.Args = append(cmd.Args, "--relative") - cmd.Args = append(cmd.Args, extraArgs...) - - patch := new(bytes.Buffer) - errBuff := new(bytes.Buffer) - - cmd.Stdout = patch - cmd.Stderr = errBuff - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("error executing %q: %w: %w", strings.Join(cmd.Args, " "), err, readAsError(errBuff)) - } - - return patch, nil -} - -func readAsError(buff io.Reader) error { - output, err := io.ReadAll(buff) - if err != nil { - return fmt.Errorf("read stderr: %w", err) - } - - return errors.New(string(output)) -} - -func isSupportedByGit(major, minor, patch int) bool { - output, err := exec.Command("git", "version").CombinedOutput() - if err != nil { - return false - } - - parts := bytes.Split(bytes.TrimSpace(output), []byte(" ")) - if len(parts) < 3 { - return false - } - - v := string(parts[2]) - if v == "" { - return false - } - - vp := regexp.MustCompile(`^(\d+)\.(\d+)(?:\.(\d+))?.*$`).FindStringSubmatch(v) - if len(vp) < 4 { - return false - } - - currentMajor, err := strconv.Atoi(vp[1]) - if err != nil { - return false - } - - currentMinor, err := strconv.Atoi(vp[2]) - if err != nil { - return false - } - - currentPatch, err := strconv.Atoi(vp[3]) - if err != nil { - return false - } - - return currentMajor*1_000_000_000+currentMinor*1_000_000+currentPatch*1_000 >= major*1_000_000_000+minor*1_000_000+patch*1_000 +type pos struct { + // Line number. + lineNo int + // Position relative to first @@ in file. + hunkPos int } diff --git a/vendor/github.com/google/gnostic-models/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go index 1bfe96121..26b31e51e 100644 --- a/vendor/github.com/google/gnostic-models/compiler/context.go +++ b/vendor/github.com/google/gnostic-models/compiler/context.go @@ -15,7 +15,7 @@ package compiler import ( - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // Context contains state of the compiler as it traverses a document. diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go index 16ae66faa..efa07f2a9 100644 --- a/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,9 +20,9 @@ import ( "os/exec" "strings" + yaml "go.yaml.in/yaml/v3" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" ) diff --git a/vendor/github.com/google/gnostic-models/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go index 975d65e8f..a83261eb6 100644 --- a/vendor/github.com/google/gnostic-models/compiler/helpers.go +++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -20,7 +20,7 @@ import ( "sort" "strconv" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/jsonschema" ) diff --git a/vendor/github.com/google/gnostic-models/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go index be0e8b40c..da409d6b3 100644 --- a/vendor/github.com/google/gnostic-models/compiler/reader.go +++ b/vendor/github.com/google/gnostic-models/compiler/reader.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) var verboseReader = false diff --git a/vendor/github.com/google/gnostic-models/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go index 4781bdc5f..a42b8e003 100644 --- a/vendor/github.com/google/gnostic-models/jsonschema/models.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/models.go @@ -16,7 +16,7 @@ // of JSON Schemas. package jsonschema -import "gopkg.in/yaml.v3" +import "go.yaml.in/yaml/v3" // The Schema struct models a JSON Schema and, because schemas are // defined hierarchically, contains many references to itself. diff --git a/vendor/github.com/google/gnostic-models/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go index b8583d466..4f1fe0c08 100644 --- a/vendor/github.com/google/gnostic-models/jsonschema/reader.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/reader.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "strconv" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // This is a global map of all known Schemas. diff --git a/vendor/github.com/google/gnostic-models/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go index 340dc5f93..19f5ddeae 100644 --- a/vendor/github.com/google/gnostic-models/jsonschema/writer.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/writer.go @@ -17,7 +17,7 @@ package jsonschema import ( "fmt" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) const indentation = " " diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index d71fe6d54..de337d80c 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) @@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + message := "contains an invalid AdditionalPropertiesItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -2543,7 +2543,7 @@ func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyPara // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid NonBodyParameter") + message := "contains an invalid NonBodyParameter" err := compiler.NewError(context, message) errors = []error{err} } @@ -3271,7 +3271,7 @@ func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid Parameter") + message := "contains an invalid Parameter" err := compiler.NewError(context, message) errors = []error{err} } @@ -3345,7 +3345,7 @@ func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersIte // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ParametersItem") + message := "contains an invalid ParametersItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -4561,7 +4561,7 @@ func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ResponseValue") + message := "contains an invalid ResponseValue" err := compiler.NewError(context, message) errors = []error{err} } @@ -5030,7 +5030,7 @@ func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SchemaItem") + message := "contains an invalid SchemaItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -5160,7 +5160,7 @@ func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*Secu // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") + message := "contains an invalid SecurityDefinitionsItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -6930,7 +6930,7 @@ func (m *BodyParameter) ToRawInfo() *yaml.Node { // always include this required field. info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7149,7 +7149,7 @@ func (m *FileSchema) ToRawInfo() *yaml.Node { // always include this required field. info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.ReadOnly != false { + if m.ReadOnly { info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } @@ -7176,7 +7176,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7192,7 +7192,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } - if m.AllowEmptyValue != false { + if m.AllowEmptyValue { info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } @@ -7220,7 +7220,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7228,7 +7228,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7252,7 +7252,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7306,7 +7306,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7314,7 +7314,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7338,7 +7338,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7373,7 +7373,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7413,7 +7413,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7421,7 +7421,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7445,7 +7445,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7940,7 +7940,7 @@ func (m *Operation) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } - if m.Deprecated != false { + if m.Deprecated { info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) } @@ -8110,7 +8110,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8118,7 +8118,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8142,7 +8142,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8218,7 +8218,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8226,7 +8226,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8250,7 +8250,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8296,7 +8296,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -8312,7 +8312,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } - if m.AllowEmptyValue != false { + if m.AllowEmptyValue { info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } @@ -8340,7 +8340,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8348,7 +8348,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8372,7 +8372,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8514,7 +8514,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8522,7 +8522,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8546,7 +8546,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8610,7 +8610,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) } - if m.ReadOnly != false { + if m.ReadOnly { info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } @@ -8796,11 +8796,11 @@ func (m *Xml) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) } - if m.Attribute != false { + if m.Attribute { info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) } - if m.Wrapped != false { + if m.Wrapped { info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) } diff --git a/vendor/github.com/google/gnostic-models/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go index e96ac0d6d..89469a13e 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -15,7 +15,7 @@ package openapi_v2 import ( - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index 4b1131ce1..662772dd9 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) @@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + message := "contains an invalid AdditionalPropertiesItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -113,7 +113,7 @@ func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpress // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AnyOrExpression") + message := "contains an invalid AnyOrExpression" err := compiler.NewError(context, message) errors = []error{err} } @@ -227,7 +227,7 @@ func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*Callback // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid CallbackOrReference") + message := "contains an invalid CallbackOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -979,7 +979,7 @@ func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOr // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ExampleOrReference") + message := "contains an invalid ExampleOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -1320,7 +1320,7 @@ func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrRe // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid HeaderOrReference") + message := "contains an invalid HeaderOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -1713,7 +1713,7 @@ func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrRefere // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid LinkOrReference") + message := "contains an invalid LinkOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3090,7 +3090,7 @@ func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*Paramet // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ParameterOrReference") + message := "contains an invalid ParameterOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3606,7 +3606,7 @@ func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*Reque // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid RequestBodyOrReference") + message := "contains an invalid RequestBodyOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3743,7 +3743,7 @@ func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*Response // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ResponseOrReference") + message := "contains an invalid ResponseOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -4310,7 +4310,7 @@ func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrRe // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SchemaOrReference") + message := "contains an invalid SchemaOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -4543,7 +4543,7 @@ func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*Se // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference") + message := "contains an invalid SecuritySchemeOrReference" err := compiler.NewError(context, message) errors = []error{err} } diff --git a/vendor/github.com/google/gnostic-models/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go index 1cee46773..499ff883c 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -15,7 +15,7 @@ package openapi_v3 import ( - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) diff --git a/vendor/github.com/gostaticanalysis/comment/.tagpr b/vendor/github.com/gostaticanalysis/comment/.tagpr new file mode 100644 index 000000000..59bf98541 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/.tagpr @@ -0,0 +1,35 @@ +# config file for the tagpr in git config format +# The tagpr generates the initial configuration, which you can rewrite to suit your environment. +# CONFIGURATIONS: +# tagpr.releaseBranch +# Generally, it is "main." It is the branch for releases. The pcpr tracks this branch, +# creates or updates a pull request as a release candidate, or tags when they are merged. +# +# tagpr.versionFile +# Versioning file containing the semantic version needed to be updated at release. +# It will be synchronized with the "git tag". +# Often this is a meta-information file such as gemspec, setup.cfg, package.json, etc. +# Sometimes the source code file, such as version.go or Bar.pm, is used. +# If you do not want to use versioning files but only git tags, specify the "-" string here. +# You can specify multiple version files by comma separated strings. +# +# tagpr.vPrefix +# Flag whether or not v-prefix is added to semver when git tagging. (e.g. v1.2.3 if true) +# This is only a tagging convention, not how it is described in the version file. +# +# tagpr.changelog (Optional) +# Flag whether or not changelog is added or changed during the release. +# +# tagpr.command (Optional) +# Command to change files just before release. +# +# tagpr.tmplate (Optional) +# Pull request template in go template format +# +# tagpr.release (Optional) +# GitHub Release creation behavior after tagging [true, draft, false] +# If this value is not set, the release is to be created. +[tagpr] + vPrefix = true + releaseBranch = main + versionFile = version.txt diff --git a/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md b/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md new file mode 100644 index 000000000..941cc15ff --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md @@ -0,0 +1,34 @@ +# Changelog + +## [v1.5.0](https://github.com/gostaticanalysis/comment/compare/v1.4.2...v1.5.0) - 2024-11-15 +- Add tagpr and testvet by @tenntenn in https://github.com/gostaticanalysis/comment/pull/18 +- Add IgnorePosLine and deprecate IgnoreLine by @neglect-yp in https://github.com/gostaticanalysis/comment/pull/17 +- Fix errors for testvet by @tenntenn in https://github.com/gostaticanalysis/comment/pull/20 +- Add version.txt by @tenntenn in https://github.com/gostaticanalysis/comment/pull/21 +- Update go version and dependencies by @tenntenn in https://github.com/gostaticanalysis/comment/pull/19 + +## [v1.4.2](https://github.com/gostaticanalysis/comment/compare/v1.4.1...v1.4.2) - 2021-03-03 +- passes/commentmap: use txtar for testdata by @zchee in https://github.com/gostaticanalysis/comment/pull/14 +- github/workflows: add test GHA by @zchee in https://github.com/gostaticanalysis/comment/pull/15 +- omment: fix hasIgnoreCheck to more pares lines by @zchee in https://github.com/gostaticanalysis/comment/pull/16 + +## [v1.4.1](https://github.com/gostaticanalysis/comment/compare/v1.4.0...v1.4.1) - 2020-09-10 +- Fix comment directive parsing in Go 1.15+ by @nmiyake in https://github.com/gostaticanalysis/comment/pull/13 +- gofmt files by @nmiyake in https://github.com/gostaticanalysis/comment/pull/12 +- Fix logic error in hasIgnoreCheck by @nmiyake in https://github.com/gostaticanalysis/comment/pull/11 + +## [v1.4.0](https://github.com/gostaticanalysis/comment/compare/v1.3.0...v1.4.0) - 2020-08-20 +- Add CommentsByPosLine by @tenntenn in https://github.com/gostaticanalysis/comment/pull/9 + +## [v1.3.0](https://github.com/gostaticanalysis/comment/compare/v1.2.0...v1.3.0) - 2020-01-30 +- Fix link to ast package by @po3rin in https://github.com/gostaticanalysis/comment/pull/4 +- Add IgnoreLine by @tenntenn in https://github.com/gostaticanalysis/comment/pull/5 + +## [v1.2.0](https://github.com/gostaticanalysis/comment/compare/v1.1.0...v1.2.0) - 2019-03-18 +- Add IgnorePos by @tenntenn in https://github.com/gostaticanalysis/comment/pull/3 + +## [v1.1.0](https://github.com/gostaticanalysis/comment/compare/v1.0.0...v1.1.0) - 2019-03-08 +- Add ignore by @tenntenn in https://github.com/gostaticanalysis/comment/pull/1 +- Fix Ignore and add tests by @tenntenn in https://github.com/gostaticanalysis/comment/pull/2 + +## [v1.0.0](https://github.com/gostaticanalysis/comment/commits/v1.0.0) - 2019-03-08 diff --git a/vendor/github.com/gostaticanalysis/comment/comment.go b/vendor/github.com/gostaticanalysis/comment/comment.go index 79cb09382..2e418a466 100644 --- a/vendor/github.com/gostaticanalysis/comment/comment.go +++ b/vendor/github.com/gostaticanalysis/comment/comment.go @@ -52,7 +52,8 @@ func (maps Maps) Annotated(n ast.Node, annotation string) bool { // Ignore checks either specified AST node is ignored by the check. // It follows staticcheck style as the below. -// //lint:ignore Check1[,Check2,...,CheckN] reason +// +// //lint:ignore Check1[,Check2,...,CheckN] reason func (maps Maps) Ignore(n ast.Node, check string) bool { for _, cg := range maps.Comments(n) { if hasIgnoreCheck(cg, check) { @@ -64,7 +65,8 @@ func (maps Maps) Ignore(n ast.Node, check string) bool { // IgnorePos checks either specified postion of AST node is ignored by the check. // It follows staticcheck style as the below. -// //lint:ignore Check1[,Check2,...,CheckN] reason +// +// //lint:ignore Check1[,Check2,...,CheckN] reason func (maps Maps) IgnorePos(pos token.Pos, check string) bool { for _, cg := range maps.CommentsByPos(pos) { if hasIgnoreCheck(cg, check) { @@ -109,9 +111,11 @@ func (maps Maps) CommentsByPosLine(fset *token.FileSet, pos token.Pos) []*ast.Co return nil } +// Deprecated: This function does not work with multiple files. // IgnoreLine checks either specified lineof AST node is ignored by the check. // It follows staticcheck style as the below. -// //lint:ignore Check1[,Check2,...,CheckN] reason +// +// //lint:ignore Check1[,Check2,...,CheckN] reason func (maps Maps) IgnoreLine(fset *token.FileSet, line int, check string) bool { for _, cg := range maps.CommentsByLine(fset, line) { if hasIgnoreCheck(cg, check) { @@ -121,6 +125,19 @@ func (maps Maps) IgnoreLine(fset *token.FileSet, line int, check string) bool { return false } +// IgnorePosLine checks either specified lineof AST node is ignored by the check. +// It follows staticcheck style as the below. +// +// //lint:ignore Check1[,Check2,...,CheckN] reason +func (maps Maps) IgnorePosLine(fset *token.FileSet, pos token.Pos, check string) bool { + for _, cg := range maps.CommentsByPosLine(fset, pos) { + if hasIgnoreCheck(cg, check) { + return true + } + } + return false +} + // hasIgnoreCheck returns true if the provided CommentGroup starts with a comment // of the form "//lint:ignore Check1[,Check2,...,CheckN] reason" and one of the // checks matches the provided check. diff --git a/vendor/github.com/gostaticanalysis/comment/version.txt b/vendor/github.com/gostaticanalysis/comment/version.txt new file mode 100644 index 000000000..2e7bd9108 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/comment/version.txt @@ -0,0 +1 @@ +v1.5.0 diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/.tagpr b/vendor/github.com/gostaticanalysis/forcetypeassert/.tagpr new file mode 100644 index 000000000..59bf98541 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/.tagpr @@ -0,0 +1,35 @@ +# config file for the tagpr in git config format +# The tagpr generates the initial configuration, which you can rewrite to suit your environment. +# CONFIGURATIONS: +# tagpr.releaseBranch +# Generally, it is "main." It is the branch for releases. The pcpr tracks this branch, +# creates or updates a pull request as a release candidate, or tags when they are merged. +# +# tagpr.versionFile +# Versioning file containing the semantic version needed to be updated at release. +# It will be synchronized with the "git tag". +# Often this is a meta-information file such as gemspec, setup.cfg, package.json, etc. +# Sometimes the source code file, such as version.go or Bar.pm, is used. +# If you do not want to use versioning files but only git tags, specify the "-" string here. +# You can specify multiple version files by comma separated strings. +# +# tagpr.vPrefix +# Flag whether or not v-prefix is added to semver when git tagging. (e.g. v1.2.3 if true) +# This is only a tagging convention, not how it is described in the version file. +# +# tagpr.changelog (Optional) +# Flag whether or not changelog is added or changed during the release. +# +# tagpr.command (Optional) +# Command to change files just before release. +# +# tagpr.tmplate (Optional) +# Pull request template in go template format +# +# tagpr.release (Optional) +# GitHub Release creation behavior after tagging [true, draft, false] +# If this value is not set, the release is to be created. +[tagpr] + vPrefix = true + releaseBranch = main + versionFile = version.txt diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/CHANGELOG.md b/vendor/github.com/gostaticanalysis/forcetypeassert/CHANGELOG.md new file mode 100644 index 000000000..7575fec62 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/CHANGELOG.md @@ -0,0 +1,19 @@ +# Changelog + +## [v0.2.0](https://github.com/gostaticanalysis/forcetypeassert/compare/v0.1.0...v0.2.0) - 2025-02-13 +- Update x/tools to fix panic in tests by @alexandear in https://github.com/gostaticanalysis/forcetypeassert/pull/19 +- go.mod: bump golang.org/x/tools dependency by @egonelbre in https://github.com/gostaticanalysis/forcetypeassert/pull/20 +- Add tagpr and version up Go and dependencies by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/21 +- Support any by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/23 +- Fix for #18 by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/24 + +## [v0.1.0](https://github.com/gostaticanalysis/forcetypeassert/commits/v0.1.0) - 2021-09-08 +- update check pattern by @knsh14 in https://github.com/gostaticanalysis/forcetypeassert/pull/1 +- Fix typo by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/3 +- Add reviewdog setting by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/4 +- Add an explanation on how to fix the linter errors by @ozon2 in https://github.com/gostaticanalysis/forcetypeassert/pull/9 +- Delete reviewdog.yml by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/11 +- Create testandvet.yml by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/10 +- Fix bug for valuespec by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/12 +- Fix bugs for expressions by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/13 +- Add result by @tenntenn in https://github.com/gostaticanalysis/forcetypeassert/pull/14 diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go b/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go index bb48485d9..e1b21825b 100644 --- a/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/forcetypeassert.go @@ -2,6 +2,7 @@ package forcetypeassert import ( "go/ast" + "go/types" "reflect" "golang.org/x/tools/go/analysis" @@ -42,7 +43,9 @@ func (p *Panicable) At(i int) ast.Node { const Doc = "forcetypeassert is finds type assertions which did forcely" -func run(pass *analysis.Pass) (interface{}, error) { +var anyTyp = types.Universe.Lookup("any").Type() + +func run(pass *analysis.Pass) (any, error) { inspect, _ := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) result := &Panicable{m: make(map[ast.Node]bool)} @@ -62,7 +65,7 @@ func run(pass *analysis.Pass) (interface{}, error) { case *ast.ValueSpec: return checkValueSpec(pass, result, n) case *ast.TypeAssertExpr: - if n.Type != nil { + if n.Type != nil && !isAny(pass, n.Type) { result.m[n] = true result.nodes = append(result.nodes, n) pass.Reportf(n.Pos(), "type assertion must be checked") @@ -76,6 +79,10 @@ func run(pass *analysis.Pass) (interface{}, error) { return result, nil } +func isAny(pass *analysis.Pass, expr ast.Expr) bool { + return types.Identical(pass.TypesInfo.TypeOf(expr), anyTyp) +} + func checkAssignStmt(pass *analysis.Pass, result *Panicable, n *ast.AssignStmt) bool { tae := findTypeAssertion(n.Rhs) if tae == nil { @@ -83,11 +90,16 @@ func checkAssignStmt(pass *analysis.Pass, result *Panicable, n *ast.AssignStmt) } switch { + + // if right hand is a call expression, assign statement can't assert boolean value which describes type assertion is succeeded + case len(n.Rhs) == 1 && isCallExpr(n.Rhs[0]): + pass.Reportf(n.Pos(), "right hand must be only type assertion") + return false // if right hand has 2 or more values, assign statement can't assert boolean value which describes type assertion is succeeded case len(n.Rhs) > 1: pass.Reportf(n.Pos(), "right hand must be only type assertion") return false - case len(n.Lhs) != 2 && tae.Type != nil: + case len(n.Lhs) != 2 && tae.Type != nil && !isAny(pass, tae.Type): result.m[n] = true result.nodes = append(result.nodes, n) pass.Reportf(n.Pos(), "type assertion must be checked") @@ -106,11 +118,15 @@ func checkValueSpec(pass *analysis.Pass, result *Panicable, n *ast.ValueSpec) bo } switch { + // if right hand is a call expression, assign statement can't assert boolean value which describes type assertion is succeeded + case len(n.Values) == 1 && isCallExpr(n.Values[0]): + pass.Reportf(n.Pos(), "right hand must be only type assertion") + return false // if right hand has 2 or more values, assign statement can't assert boolean value which describes type assertion is succeeded case len(n.Values) > 1: pass.Reportf(n.Pos(), "right hand must be only type assertion") return false - case len(n.Names) != 2 && tae.Type != nil: + case len(n.Names) != 2 && tae.Type != nil && !isAny(pass, tae.Type): result.m[n] = true result.nodes = append(result.nodes, n) pass.Reportf(n.Pos(), "type assertion must be checked") @@ -141,3 +157,8 @@ func findTypeAssertion(exprs []ast.Expr) *ast.TypeAssertExpr { } return nil } + +func isCallExpr(expr ast.Expr) bool { + _, isCallExpr := expr.(*ast.CallExpr) + return isCallExpr +} diff --git a/vendor/github.com/gostaticanalysis/forcetypeassert/version.txt b/vendor/github.com/gostaticanalysis/forcetypeassert/version.txt new file mode 100644 index 000000000..1474d00f0 --- /dev/null +++ b/vendor/github.com/gostaticanalysis/forcetypeassert/version.txt @@ -0,0 +1 @@ +v0.2.0 diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md new file mode 100644 index 000000000..556f1a67b --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md @@ -0,0 +1,27 @@ +# UNRELEASED + +# 2.0.0 (December 15th, 2022) + +* Update API to use generics [[GH-43](https://github.com/hashicorp/go-immutable-radix/pull/43)) + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/v2/LICENSE similarity index 50% rename from vendor/github.com/hashicorp/hcl/LICENSE rename to vendor/github.com/hashicorp/go-immutable-radix/v2/LICENSE index c33dcc7c9..f4f97ee58 100644 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/LICENSE @@ -1,90 +1,92 @@ +Copyright (c) 2015 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions -1.1. “Contributor” +1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. “Contributor Version” +1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + Contributor and that particular Contributor's Contribution. -1.3. “Contribution” +1.3. "Contribution" means Covered Software of a particular Contributor. -1.4. “Covered Software” +1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. “Incompatible With Secondary Licenses” +1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -1.6. “Executable Form” +1.6. "Executable Form" means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. "Larger Work" - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. -1.8. “License” +1.8. "License" means this document. -1.9. “Licensable” +1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -1.10. “Modifications” +1.10. "Modifications" means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. “Patent Claims” of a Contributor +1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. -1.12. “Secondary License” +1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. “Source Code Form” +1.13. "Source Code Form" means the form of the work preferred for making modifications. -1.14. “You” (or “Your”) +1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is + License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause + definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -100,57 +102,59 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party’s + b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. 2.7. Conditions @@ -163,11 +167,12 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. 3.2. Distribution of Executable Form @@ -179,39 +184,40 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). 3.4. Notices - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -220,14 +226,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. 5. Termination @@ -235,21 +241,22 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -258,16 +265,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. 7. Limitation of Liability @@ -279,27 +286,29 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. 10. Versions of the License @@ -313,23 +322,24 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. Exhibit A - Source Code Form License Notice @@ -340,15 +350,16 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. You may add additional accurate notices of copyright ownership. -Exhibit B - “Incompatible With Secondary Licenses” Notice +Exhibit B - "Incompatible With Secondary Licenses" Notice - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/README.md b/vendor/github.com/hashicorp/go-immutable-radix/v2/README.md new file mode 100644 index 000000000..e17ccf4d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/README.md @@ -0,0 +1,73 @@ +go-immutable-radix [![Run CI Tests](https://github.com/hashicorp/go-immutable-radix/actions/workflows/ci.yaml/badge.svg)](https://github.com/hashicorp/go-immutable-radix/actions/workflows/ci.yaml) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +V2 +== + +The v2 of go-immutable-radix introduces generics to improve compile-time type +safety for users of the package. The module name for v2 is +`github.com/hashicorp/go-immutable-radix/v2`. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New[int]() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New[int]() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if string(key) >= "050" { + break + } + fmt.Println(string(key)) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go new file mode 100644 index 000000000..2e452f3e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges[T any] []edge[T] + +func (e edges[T]) Len() int { + return len(e) +} + +func (e edges[T]) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges[T]) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges[T]) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go new file mode 100644 index 000000000..8774020bc --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go @@ -0,0 +1,679 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree[T any] struct { + root *Node[T] + size int +} + +// New returns an empty Tree +func New[T any]() *Tree[T] { + t := &Tree[T]{ + root: &Node[T]{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree[T]) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn[T any] struct { + // root is the modified root for the transaction. + root *Node[T] + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node[T] + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU[*Node[T], any] + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree[T]) Txn() *Txn[T] { + txn := &Txn[T]{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn[T]) Clone() *Txn[T] { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn[T]{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn[T]) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn[T]) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn[T]) writeNode(n *Node[T], forLeafUpdate bool) *Node[T] { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU[*Node[T], any](defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node[T]{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge[T], len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn[T]) trackChannelsAndCount(n *Node[T]) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn[T]) mergeChild(n *Node[T]) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge[T], len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn[T]) insert(n *Node[T], k, search []byte, v T) (*Node[T], T, bool) { + var zero T + + // Handle key exhaustion + if len(search) == 0 { + var oldVal T + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode[T]{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge[T]{ + label: search[0], + node: &Node[T]{ + mutateCh: make(chan struct{}), + leaf: &leafNode[T]{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, zero, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node[T]{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge[T]{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge[T]{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode[T]{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, zero, false + } + + // Create a new edge for the node + splitNode.addEdge(edge[T]{ + label: search[0], + node: &Node[T]{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, zero, false +} + +// delete does a recursive deletion +func (t *Txn[T]) delete(n *Node[T], search []byte) (*Node[T], *leafNode[T]) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn[T]) deletePrefix(n *Node[T], search []byte) (*Node[T], int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn[T]) Insert(k []byte, v T) (T, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn[T]) Delete(k []byte) (T, bool) { + var zero T + newRoot, leaf := t.delete(t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return zero, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn[T]) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn[T]) Root() *Node[T] { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn[T]) Get(k []byte) (T, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn[T]) GetWatch(k []byte) (<-chan struct{}, T, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn[T]) Commit() *Tree[T] { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn[T]) CommitOnly() *Tree[T] { + nt := &Tree[T]{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn[T]) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn[T]) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree[T]) Insert(k []byte, v T) (*Tree[T], T, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree[T]) Delete(k []byte) (*Tree[T], T, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree[T]) DeletePrefix(k []byte) (*Tree[T], bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree[T]) Root() *Node[T] { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree[T]) Get(k []byte) (T, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go new file mode 100644 index 000000000..ffd2721c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go @@ -0,0 +1,205 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator[T any] struct { + node *Node[T] + stack []edges[T] +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator[T]) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator[T]) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator[T]) recurseMin(n *Node[T]) *Node[T] { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator[T]) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges[T]{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node[T]) { + i.stack = append( + i.stack, + edges[T]{edge[T]{node: n}}, + ) + } + + findMin := func(n *Node[T]) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator[T]) Next() ([]byte, T, bool) { + var zero T + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges[T]{{edge[T]{node: i.node}}} + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, zero, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/node.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/node.go new file mode 100644 index 000000000..1be963922 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/node.go @@ -0,0 +1,326 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn[T any] func(k []byte, v T) bool + +// leafNode is used to represent a value +type leafNode[T any] struct { + mutateCh chan struct{} + key []byte + val T +} + +// edge is used to represent an edge node +type edge[T any] struct { + label byte + node *Node[T] +} + +// Node is an immutable node in the radix tree +type Node[T any] struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode[T] + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges[T] +} + +func (n *Node[T]) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node[T]) addEdge(e edge[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node[T]) replaceEdge(e edge[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node[T]) getEdge(label byte) (int, *Node[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node[T]) getLowerBoundEdge(label byte) (int, *Node[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node[T]) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge[T]{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node[T]) GetWatch(k []byte) (<-chan struct{}, T, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + var zero T + return watch, zero, false +} + +func (n *Node[T]) Get(k []byte) (T, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node[T]) LongestPrefix(k []byte) ([]byte, T, bool) { + var last *leafNode[T] + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaustion + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + var zero T + return nil, zero, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node[T]) Minimum() ([]byte, T, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + var zero T + return nil, zero, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node[T]) Maximum() ([]byte, T, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node // bug? + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + var zero T + return nil, zero, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node[T]) Iterator() *Iterator[T] { + return &Iterator[T]{node: n} +} + +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node[T]) ReverseIterator() *ReverseIterator[T] { + return NewReverseIterator(n) +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node[T]) PathIterator(path []byte) *PathIterator[T] { + return &PathIterator[T]{node: n, path: path} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node[T]) rawIterator() *rawIterator[T] { + iter := &rawIterator[T]{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node[T]) Walk(fn WalkFn[T]) { + recursiveWalk(n, fn) +} + +// WalkBackwards is used to walk the tree in reverse order +func (n *Node[T]) WalkBackwards(fn WalkFn[T]) { + reverseRecursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node[T]) WalkPrefix(prefix []byte, fn WalkFn[T]) { + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node[T]) WalkPath(path []byte, fn WalkFn[T]) { + i := n.PathIterator(path) + + for path, val, ok := i.Next(); ok; path, val, ok = i.Next() { + if fn(path, val) { + return + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk[T any](n *Node[T], fn WalkFn[T]) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk[T any](n *Node[T], fn WalkFn[T]) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go new file mode 100644 index 000000000..21942afc8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go @@ -0,0 +1,59 @@ +package iradix + +import "bytes" + +// PathIterator is used to iterate over a set of nodes from the root +// down to a specified path. This will iterate over the same values that +// the Node.WalkPath method will. +type PathIterator[T any] struct { + node *Node[T] + path []byte + done bool +} + +// Next returns the next node in order +func (i *PathIterator[T]) Next() ([]byte, T, bool) { + // This is mostly just an asynchronous implementation of the WalkPath + // method on the node. + var zero T + var leaf *leafNode[T] + + for leaf == nil && i.node != nil { + // visit the leaf values if any + if i.node.leaf != nil { + leaf = i.node.leaf + } + + i.iterate() + } + + if leaf != nil { + return leaf.key, leaf.val, true + } + + return nil, zero, false +} + +func (i *PathIterator[T]) iterate() { + // Check for key exhaustion + if len(i.path) == 0 { + i.node = nil + return + } + + // Look for an edge + _, i.node = i.node.getEdge(i.path[0]) + if i.node == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(i.path, i.node.prefix) { + i.path = i.path[len(i.node.prefix):] + } else { + // there are no more nodes to iterate through so + // nil out the node to prevent returning results + // for subsequent calls to Next() + i.node = nil + } +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go new file mode 100644 index 000000000..dd84f089d --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator[T any] struct { + // node is the starting node in the tree for the iterator. + node *Node[T] + + // stack keeps track of edges in the frontier. + stack []rawStackEntry[T] + + // pos is the current position of the iterator. + pos *Node[T] + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry[T any] struct { + path string + edges edges[T] +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator[T]) Front() *Node[T] { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator[T]) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator[T]) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry[T]{ + { + edges: edges[T]{ + edge[T]{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry[T]{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go new file mode 100644 index 000000000..2a06cde7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go @@ -0,0 +1,240 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator[T any] struct { + i *Iterator[T] + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node[T]]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator[T any](n *Node[T]) *ReverseIterator[T] { + return &ReverseIterator[T]{ + i: &Iterator[T]{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator[T]) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator[T]) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator[T]) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges[T]{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node[T]]struct{}) + } + + found := func(n *Node[T]) { + ri.i.stack = append(ri.i.stack, edges[T]{edge[T]{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges[T]{edge[T]{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges[T]{edge[T]{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator[T]) Previous() ([]byte, T, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges[T]{ + { + edge[T]{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node[T]]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + var zero T + return nil, zero, false +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 000000000..0e5d580e0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 000000000..5cd74a034 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/golangci/gofmt/goimports/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list similarity index 97% rename from vendor/github.com/golangci/gofmt/goimports/LICENSE rename to vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list index 6a66aea5e..c4764e6b2 100644 --- a/vendor/github.com/golangci/gofmt/goimports/LICENSE +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -1,3 +1,5 @@ +This license applies to simplelru/list.go + Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go new file mode 100644 index 000000000..f69792388 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package simplelru + +import ( + "errors" + + "github.com/hashicorp/golang-lru/v2/internal" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback[K comparable, V any] func(key K, value V) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU[K comparable, V any] struct { + size int + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] +} + +// NewLRU constructs an LRU of the given size +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { + if size <= 0 { + return nil, errors.New("must provide a positive size") + } + + c := &LRU[K, V]{ + size: size, + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU[K, V]) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value = value + return false + } + + // Add new item + ent := c.evictList.PushFront(key, value) + c.items[key] = ent + + evict := c.evictList.Length() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU[K, V]) Contains(key K) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] + if ent, ok = c.items[key]; ok { + return ent.Value, true + } + return +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU[K, V]) Remove(key K) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + return ent.Key, ent.Value, true + } + return +} + +// GetOldest returns the oldest entry +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true + } + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key + i++ + } + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + +// Len returns the number of items in the cache. +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() +} + +// Resize changes the cache size. +func (c *LRU[K, V]) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) + if c.onEvict != nil { + c.onEvict(e.Key, e.Value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go new file mode 100644 index 000000000..043b8bcc3 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package simplelru provides simple LRU implementation based on build-in container/list. +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache[K comparable, V any] interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key K, value V) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key K) (value V, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key K) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key K) (value V, ok bool) + + // Removes a key from the cache. + Remove(key K) bool + + // Removes the oldest entry from cache. + RemoveOldest() (K, V, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (K, V, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b5..000000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a3216..000000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f5..000000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326d..000000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index bed9ebbe1..000000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,729 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for _, f := range fields { - field, fieldValue := f.field, f.val - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - fieldName := field.Name - - tagValue := field.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, fieldValue) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - fieldValue.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, fieldValue) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { - return err - } - } - - decodedFields = append(decodedFields, field.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381df..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index 64c83bcfb..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,532 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")), - } - } - - // key=#comment - // val - if p.lineComment != nil { - o.LineComment, p.lineComment = p.lineComment, nil - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), - } - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index 7c038d12a..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,789 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - // If key and val are on different lines, treat line comments like lead comments. - if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - if p.isSingleLineList(l) { - return p.singleLineList(l) - } - - var buf bytes.Buffer - buf.WriteString("[") - buf.WriteByte(newline) - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - haveEmptyLine := false - for i, item := range l.List { - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // Ensure an empty line before every element with a - // lead comment (except the first item in a list). - if !haveEmptyLine && i != 0 { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - buf.WriteByte(newline) - - // Ensure an empty line after every element with a - // lead comment (except the first item in a list). - haveEmptyLine = leadComment && i != len(l.List)-1 - if haveEmptyLine { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// isSingleLineList returns true if: -// * they were previously formatted entirely on one line -// * they consist entirely of literals -// * there are either no heredoc strings or the list has exactly one element -// * there are no line comments -func (printer) isSingleLineList(l *ast.ListType) bool { - for _, item := range l.List { - if item.Pos().Line != l.Lbrack.Line { - return false - } - - lit, ok := item.(*ast.LiteralType) - if !ok { - return false - } - - if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { - return false - } - - if lit.LineComment != nil { - return false - } - } - - return true -} - -// singleLineList prints a simple single line list. -// For a definition of "simple", see isSingleLineList above. -func (p *printer) singleLineList(l *ast.ListType) []byte { - buf := &bytes.Buffer{} - - buf.WriteString("[") - for i, item := range l.List { - if i != 0 { - buf.WriteString(", ") - } - - // Output the item itself - buf.Write(p.output(item)) - - // The heredoc marker needs to be at the end of line. - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 624a18fe3..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,652 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == utf8.RuneError && size == 1 { - s.err("illegal UTF-8 encoding") - return ch - } - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - if ch == '\x00' { - s.err("unexpected null character (0x00)") - return eof - } - - if ch == '\uE123' { - s.err("unicode code point U+E123 reserved for internal use") - return utf8.RuneError - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start && ch != eof { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa2..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664e..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f072..000000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index fe3f0f095..000000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c292..000000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4c..000000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/jjti/go-spancheck/.gitignore b/vendor/github.com/jjti/go-spancheck/.gitignore index 1f83be414..04b66d911 100644 --- a/vendor/github.com/jjti/go-spancheck/.gitignore +++ b/vendor/github.com/jjti/go-spancheck/.gitignore @@ -17,3 +17,5 @@ # Dependency directories (remove the comment below to include it) # vendor/ src/ + +.vscode \ No newline at end of file diff --git a/vendor/github.com/jjti/go-spancheck/.golangci.yml b/vendor/github.com/jjti/go-spancheck/.golangci.yml index 15d8513d6..5d6ab1287 100644 --- a/vendor/github.com/jjti/go-spancheck/.golangci.yml +++ b/vendor/github.com/jjti/go-spancheck/.golangci.yml @@ -17,7 +17,6 @@ linters: - errcheck - errname - errorlint - - exhaustive # checks exhaustiveness of enum switch statements - exportloopref # checks for pointers to enclosing loop variables - gci - gochecknoinits # checks that no init functions are present in Go code @@ -59,12 +58,6 @@ linters-settings: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - prefix(github.com/jjti) - exhaustive: - # Program elements to check for exhaustiveness. - # Default: [ switch ] - check: - - switch - - map gocritic: settings: captLocal: diff --git a/vendor/github.com/jjti/go-spancheck/go.work b/vendor/github.com/jjti/go-spancheck/go.work index 7d0a87b9e..ff04ca17e 100644 --- a/vendor/github.com/jjti/go-spancheck/go.work +++ b/vendor/github.com/jjti/go-spancheck/go.work @@ -1,4 +1,4 @@ -go 1.20 +go 1.22.1 use ( . diff --git a/vendor/github.com/jjti/go-spancheck/go.work.sum b/vendor/github.com/jjti/go-spancheck/go.work.sum index 04eadf2c5..c96d590d6 100644 --- a/vendor/github.com/jjti/go-spancheck/go.work.sum +++ b/vendor/github.com/jjti/go-spancheck/go.work.sum @@ -1,4 +1,11 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= diff --git a/vendor/github.com/jjti/go-spancheck/spancheck.go b/vendor/github.com/jjti/go-spancheck/spancheck.go index 8fc7945c6..49e581728 100644 --- a/vendor/github.com/jjti/go-spancheck/spancheck.go +++ b/vendor/github.com/jjti/go-spancheck/spancheck.go @@ -309,6 +309,11 @@ outer: } seen[b] = true + // Skip successors that are not nested within this current block. + if _, ok := nestedBlockTypes[b.Kind]; !ok { + continue + } + // Prune the search if the block uses v. if blockUses(pass, b) { continue @@ -330,6 +335,21 @@ outer: return search(defBlock.Succs) } +var nestedBlockTypes = map[cfg.BlockKind]struct{}{ + cfg.KindBody: {}, + cfg.KindForBody: {}, + cfg.KindForLoop: {}, + cfg.KindIfElse: {}, + cfg.KindIfThen: {}, + cfg.KindLabel: {}, + cfg.KindRangeBody: {}, + cfg.KindRangeLoop: {}, + cfg.KindSelectCaseBody: {}, + cfg.KindSelectAfterCase: {}, + cfg.KindSwitchCaseBody: {}, + cfg.KindSwitchNextCase: {}, +} + // usesCall reports whether stmts contain a use of the selName call on variable v. func usesCall( pass *analysis.Pass, @@ -340,10 +360,12 @@ func usesCall( startSpanMatchers []spanStartMatcher, depth int, ) bool { - if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just one level deep check. + if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just two levels deep. return false } + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + found, reAssigned := false, false for _, subStmt := range stmts { stack := []ast.Node{} @@ -351,7 +373,6 @@ func usesCall( switch n := n.(type) { case *ast.FuncLit: if len(stack) > 0 { - cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) g := cfgs.FuncLit(n) if g != nil && len(g.Blocks) > 0 { return usesCall(pass, g.Blocks[0].Nodes, sv, selName, ignoreCheckSig, startSpanMatchers, depth+1) @@ -367,6 +388,32 @@ func usesCall( return false } } + case *ast.DeferStmt: + if n.Call == nil { + break + } + + f, ok := n.Call.Fun.(*ast.FuncLit) + if !ok { + break + } + + if g := cfgs.FuncLit(f); g != nil && len(g.Blocks) > 0 { + for _, b := range g.Blocks { + if usesCall( + pass, + b.Nodes, + sv, + selName, + ignoreCheckSig, + startSpanMatchers, + depth+1, + ) { + found = true + return false + } + } + } case nil: if len(stack) > 0 { stack = stack[:len(stack)-1] // pop diff --git a/vendor/github.com/julz/importas/Makefile b/vendor/github.com/julz/importas/Makefile new file mode 100644 index 000000000..e9838b43b --- /dev/null +++ b/vendor/github.com/julz/importas/Makefile @@ -0,0 +1,17 @@ +# default task since it's first +.PHONY: all +all: build test + +BINARY = importas +$(BINARY): *.go go.mod go.sum + go build -o $(BINARY) + +.PHONY: build +build: $(BINARY) ## Build binary + +.PHONY: test +test: build ## Unit test + go test -v ./... + +install: ## Install binary + go install diff --git a/vendor/github.com/julz/importas/analyzer.go b/vendor/github.com/julz/importas/analyzer.go index f19653478..25bc09b82 100644 --- a/vendor/github.com/julz/importas/analyzer.go +++ b/vendor/github.com/julz/importas/analyzer.go @@ -13,7 +13,7 @@ import ( ) var config = &Config{ - RequiredAlias: make(map[string]string), + RequiredAlias: make([][]string, 0), } var Analyzer = &analysis.Analyzer{ @@ -129,11 +129,19 @@ func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, orig // skip identifiers pointing to a different import statement. continue } + pos := use.Pos() + end := use.End() + replacement := packageReplacement + + if packageReplacement == "." { + replacement = "" + end = end + 1 + } result = append(result, analysis.TextEdit{ - Pos: use.Pos(), - End: use.End(), - NewText: []byte(packageReplacement), + Pos: pos, + End: end, + NewText: []byte(replacement), }) } diff --git a/vendor/github.com/julz/importas/config.go b/vendor/github.com/julz/importas/config.go index 8c9c76d91..58be86c75 100644 --- a/vendor/github.com/julz/importas/config.go +++ b/vendor/github.com/julz/importas/config.go @@ -4,18 +4,26 @@ import ( "errors" "fmt" "regexp" + "sync" ) type Config struct { - RequiredAlias map[string]string + RequiredAlias aliasList Rules []*Rule DisallowUnaliased bool DisallowExtraAliases bool + muRules sync.Mutex } func (c *Config) CompileRegexp() error { + c.muRules.Lock() + defer c.muRules.Unlock() + if c.Rules != nil { + return nil + } rules := make([]*Rule, 0, len(c.RequiredAlias)) - for path, alias := range c.RequiredAlias { + for _, aliases := range c.RequiredAlias { + path, alias := aliases[0], aliases[1] reg, err := regexp.Compile(fmt.Sprintf("^%s$", path)) if err != nil { return err @@ -26,13 +34,15 @@ func (c *Config) CompileRegexp() error { Alias: alias, }) } - c.Rules = rules return nil } func (c *Config) findRule(path string) *Rule { - for _, rule := range c.Rules { + c.muRules.Lock() + rules := c.Rules + c.muRules.Unlock() + for _, rule := range rules { if rule.Regexp.MatchString(path) { return rule } diff --git a/vendor/github.com/julz/importas/flags.go b/vendor/github.com/julz/importas/flags.go index f8107104a..cc3f1f3aa 100644 --- a/vendor/github.com/julz/importas/flags.go +++ b/vendor/github.com/julz/importas/flags.go @@ -7,26 +7,27 @@ import ( "strings" ) +var errWrongAlias = errors.New("import flag must be of form path:alias") + func flags(config *Config) flag.FlagSet { fs := flag.FlagSet{} - fs.Var(stringMap(config.RequiredAlias), "alias", "required import alias in form path:alias") + fs.Var(&config.RequiredAlias, "alias", "required import alias in form path:alias") fs.BoolVar(&config.DisallowUnaliased, "no-unaliased", false, "do not allow unaliased imports of aliased packages") fs.BoolVar(&config.DisallowExtraAliases, "no-extra-aliases", false, "do not allow non-required aliases") return fs } -type stringMap map[string]string +type aliasList [][]string -func (v stringMap) Set(val string) error { - spl := strings.SplitN(val, ":", 2) - if len(spl) != 2 { - return errors.New("import flag must be of form path:alias") +func (v *aliasList) Set(val string) error { + lastColon := strings.LastIndex(val, ":") + if lastColon <= 1 { + return errWrongAlias } - - v[spl[0]] = spl[1] + *v = append(*v, []string{val[:lastColon], val[lastColon+1:]}) return nil } -func (v stringMap) String() string { - return fmt.Sprintf("%v", (map[string]string)(v)) +func (v *aliasList) String() string { + return fmt.Sprintf("%v", ([][]string)(*v)) } diff --git a/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go b/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go index 79dc6afcc..00c8e0e3d 100644 --- a/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go +++ b/vendor/github.com/karamaru-alpha/copyloopvar/copyloopvar.go @@ -15,7 +15,7 @@ var checkAlias bool func NewAnalyzer() *analysis.Analyzer { analyzer := &analysis.Analyzer{ Name: "copyloopvar", - Doc: "copyloopvar is a linter detects places where loop variables are copied", + Doc: "a linter detects places where loop variables are copied", Run: run, Requires: []*analysis.Analyzer{ inspect.Analyzer, @@ -77,10 +77,8 @@ func checkRangeStmt(pass *analysis.Pass, rangeStmt *ast.RangeStmt) { continue } } - pass.Report(analysis.Diagnostic{ - Pos: assignStmt.Pos(), - Message: fmt.Sprintf(`The copy of the 'for' variable "%s" can be deleted (Go 1.22+)`, right.Name), - }) + + report(pass, assignStmt, right, i) } } } @@ -124,10 +122,40 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { continue } } - pass.Report(analysis.Diagnostic{ - Pos: assignStmt.Pos(), - Message: fmt.Sprintf(`The copy of the 'for' variable "%s" can be deleted (Go 1.22+)`, right.Name), - }) + + report(pass, assignStmt, right, i) } } } + +func report(pass *analysis.Pass, assignStmt *ast.AssignStmt, right *ast.Ident, i int) { + diagnostic := analysis.Diagnostic{ + Pos: assignStmt.Pos(), + Message: fmt.Sprintf(`The copy of the 'for' variable "%s" can be deleted (Go 1.22+)`, right.Name), + } + + if i == 0 && isSimpleAssignStmt(assignStmt, right) { + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: assignStmt.Pos(), + End: assignStmt.End(), + NewText: nil, + }}, + }) + } + + pass.Report(diagnostic) +} + +func isSimpleAssignStmt(assignStmt *ast.AssignStmt, rhs *ast.Ident) bool { + if len(assignStmt.Lhs) != 1 { + return false + } + + lhs, ok := assignStmt.Lhs[0].(*ast.Ident) + if !ok { + return false + } + + return rhs.Name == lhs.Name +} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go index a7a2a30bf..325aeec98 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go @@ -23,7 +23,9 @@ func init() { } var ( - // ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files + // ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files. + // + // Deprecated: this error is no longer returned by errcheck.LoadPackages. ErrNoGoFiles = errors.New("package contains no go source files") ) @@ -162,7 +164,7 @@ var loadPackages = func(cfg *packages.Config, paths ...string) ([]*packages.Pack // LoadPackages loads all the packages in all the paths provided. It uses the // exclusions and build tags provided to by the user when loading the packages. func (c *Checker) LoadPackages(paths ...string) ([]*packages.Package, error) { - buildFlags := []string{fmtTags(c.Tags)} + buildFlags := []string{fmt.Sprintf("-tags=%s", strings.Join(c.Tags, ","))} if c.Mod != "" { buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", c.Mod)) } diff --git a/vendor/github.com/kisielk/errcheck/errcheck/tags.go b/vendor/github.com/kisielk/errcheck/errcheck/tags.go deleted file mode 100644 index 7b423ca69..000000000 --- a/vendor/github.com/kisielk/errcheck/errcheck/tags.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.13 - -package errcheck - -import ( - "fmt" - "strings" -) - -func fmtTags(tags []string) string { - return fmt.Sprintf("-tags=%s", strings.Join(tags, ",")) -} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go b/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go deleted file mode 100644 index 2f534f40a..000000000 --- a/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.11 -// +build !go1.13 - -package errcheck - -import ( - "fmt" - "strings" -) - -func fmtTags(tags []string) string { - return fmt.Sprintf("-tags=%s", strings.Join(tags, " ")) -} diff --git a/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go b/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go index 62696351a..c62909a87 100644 --- a/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go +++ b/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go @@ -727,6 +727,14 @@ func (r *runner) getFunction(instr ssa.Instruction) (f *ssa.Function) { } func (r *runner) isCtxType(tp types.Type) bool { + if p, ok := tp.(*types.Pointer); ok { + // opaqueType is not exposed and lead to unreachable error. + // Related to https://github.com/golang/tools/blob/63229bc79404d8cf2fe4e88ad569168fe251d993/go/ssa/builder.go#L107 + if p.Elem().String() == "deferStack" { + return false + } + } + return types.Identical(tp, r.ctxTyp) || types.Identical(tp, r.ctxPTyp) } diff --git a/vendor/github.com/kyoh86/exportloopref/.golangci.yml b/vendor/github.com/kyoh86/exportloopref/.golangci.yml deleted file mode 100644 index e876057f3..000000000 --- a/vendor/github.com/kyoh86/exportloopref/.golangci.yml +++ /dev/null @@ -1,4 +0,0 @@ -linters: - enable: - - unparam - - exportloopref diff --git a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml deleted file mode 100644 index 95d44aaac..000000000 --- a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml +++ /dev/null @@ -1,51 +0,0 @@ -# yaml-language-server: $schema=https://goreleaser.com/static/schema.json - -project_name: exportloopref -builds: - - id: default - goos: - - linux - - darwin - - windows - goarch: - - amd64 - - arm64 - - "386" - main: ./cmd/exportloopref - binary: exportloopref -brews: - - install: | - bin.install "exportloopref" - tap: - owner: kyoh86 - name: homebrew-tap - folder: Formula - homepage: https://github.com/kyoh86/exportloopref - description: An analyzer that finds exporting pointers for loop variables. - license: MIT -nfpms: - - builds: - - default - maintainer: kyoh86 - homepage: https://github.com/kyoh86/exportloopref - description: An analyzer that finds exporting pointers for loop variables. - license: MIT - formats: - - apk - - deb - - rpm -archives: - - id: gzip - format: tar.gz - format_overrides: - - goos: windows - format: zip - files: - - licence* - - LICENCE* - - license* - - LICENSE* - - readme* - - README* - - changelog* - - CHANGELOG* diff --git a/vendor/github.com/kyoh86/exportloopref/LICENSE b/vendor/github.com/kyoh86/exportloopref/LICENSE deleted file mode 100644 index 7ac9dba4a..000000000 --- a/vendor/github.com/kyoh86/exportloopref/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 kyoh86 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE -OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kyoh86/exportloopref/Makefile b/vendor/github.com/kyoh86/exportloopref/Makefile deleted file mode 100644 index 4d3ef22f7..000000000 --- a/vendor/github.com/kyoh86/exportloopref/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -.PHONY: gen lint test install man - -VERSION := `git vertag get` -COMMIT := `git rev-parse HEAD` - -gen: - go generate ./... - -lint: gen - golangci-lint run - -test: lint - go test -v --race ./... - -install: test - go install -a -ldflags "-X=main.version=$(VERSION) -X=main.commit=$(COMMIT)" ./... diff --git a/vendor/github.com/kyoh86/exportloopref/README.md b/vendor/github.com/kyoh86/exportloopref/README.md deleted file mode 100644 index 0f581ffce..000000000 --- a/vendor/github.com/kyoh86/exportloopref/README.md +++ /dev/null @@ -1,223 +0,0 @@ -# exportloopref - -An analyzer that finds exporting pointers for loop variables. -![](https://repository-images.githubusercontent.com/256768552/a1c5bb80-dd73-11eb-9453-e520f517e730) -Pin them all! - -[![PkgGoDev](https://pkg.go.dev/badge/kyoh86/exportloopref)](https://pkg.go.dev/kyoh86/exportloopref) -[![Go Report Card](https://goreportcard.com/badge/github.com/kyoh86/exportloopref)](https://goreportcard.com/report/github.com/kyoh86/exportloopref) -[![Coverage Status](https://img.shields.io/codecov/c/github/kyoh86/exportloopref.svg)](https://codecov.io/gh/kyoh86/exportloopref) -[![Release](https://github.com/kyoh86/exportloopref/workflows/Release/badge.svg)](https://github.com/kyoh86/exportloopref/releases) - -## What's this? - -Sample problem code from: https://github.com/kyoh86/exportloopref/blob/main/testdata/src/simple/simple.go - -```go -package main - -func main() { - var intArray [4]*int - var intSlice []*int - var intRef *int - var intStr struct{ x *int } - - println("loop expecting 10, 11, 12, 13") - for i, p := range []int{10, 11, 12, 13} { - printp(&p) // not a diagnostic - intSlice = append(intSlice, &p) // want "exporting a pointer for the loop variable p" - intArray[i] = &p // want "exporting a pointer for the loop variable p" - if i%2 == 0 { - intRef = &p // want "exporting a pointer for the loop variable p" - intStr.x = &p // want "exporting a pointer for the loop variable p" - } - var vStr struct{ x *int } - var vArray [4]*int - var v *int - if i%2 == 0 { - v = &p // not a diagnostic (x is local variable) - vArray[1] = &p // not a diagnostic (x is local variable) - vStr.x = &p - } - _ = v - } - - println(`slice expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) - for _, p := range intSlice { - printp(p) - } - println(`array expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) - for _, p := range intArray { - printp(p) - } - println(`captured value expecting "12" but "13"`) - printp(intRef) -} - -func printp(p *int) { - println(*p) -} -``` - -In Go, the `p` variable in the above loops is actually a single variable. -So in many case (like the above), using it makes for us annoying bugs. - -You can find them with `exportloopref`, and fix it. - -```go -package main - -func main() { - var intArray [4]*int - var intSlice []*int - var intRef *int - var intStr struct{ x *int } - - println("loop expecting 10, 11, 12, 13") - for i, p := range []int{10, 11, 12, 13} { - p := p // FIX variable into the local variable - printp(&p) - intSlice = append(intSlice, &p) - intArray[i] = &p - if i%2 == 0 { - intRef = &p - intStr.x = &p - } - var vStr struct{ x *int } - var vArray [4]*int - var v *int - if i%2 == 0 { - v = &p - vArray[1] = &p - vStr.x = &p - } - _ = v - } - - println(`slice expecting "10, 11, 12, 13"`) - for _, p := range intSlice { - printp(p) - } - println(`array expecting "10, 11, 12, 13"`) - for _, p := range intArray { - printp(p) - } - println(`captured value expecting "12"`) - printp(intRef) -} - -func printp(p *int) { - println(*p) -} -``` - -ref: https://github.com/kyoh86/exportloopref/blob/main/testdata/src/fixed/fixed.go - -## Sensing policy - -I want to make exportloopref as accurately as possible. -So some cases of lints will be false-negative. - -e.g. - -```go -var s Foo -for _, p := range []int{10, 11, 12, 13} { - s.Bar(&p) // If s stores the pointer, it will be bug. -} -``` - -If you want to report all of lints (with some false-positives), -you should use [looppointer](https://github.com/kyoh86/looppointer). - -### Known false negatives - -Case 1: pass the pointer to function to export. - -Case 2: pass the pointer to local variable, and export it. - -```go -package main - -type List []*int - -func (l *List) AppendP(p *int) { - *l = append(*l, p) -} - -func main() { - var slice []*int - list := List{} - - println("loop expect exporting 10, 11, 12, 13") - for _, v := range []int{10, 11, 12, 13} { - list.AppendP(&v) // Case 1: wanted "exporting a pointer for the loop variable v", but cannot be found - - p := &v // p is the local variable - slice = append(slice, p) // Case 2: wanted "exporting a pointer for the loop variable v", but cannot be found - } - - println(`slice expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) - for _, p := range slice { - printp(p) - } - println(`array expecting "10, 11, 12, 13" but "13, 13, 13, 13"`) - for _, p := range ([]*int)(list) { - printp(p) - } -} - -func printp(p *int) { - println(*p) -} -``` - -## Install - -go: - -```console -$ go get github.com/kyoh86/exportloopref/cmd/exportloopref -``` - -[homebrew](https://brew.sh/): - -```console -$ brew install kyoh86/tap/exportloopref -``` - -[gordon](https://github.com/kyoh86/gordon): - -```console -$ gordon install kyoh86/exportloopref -``` - -## Usage - -``` -exportloopref [-flag] [package] -``` - -### Flags - -| Flag | Description | -| --- | --- | -| -V | print version and exit | -| -all | no effect (deprecated) | -| -c int | display offending line with this many lines of context (default -1) | -| -cpuprofile string | write CPU profile to this file | -| -debug string | debug flags, any subset of "fpstv" | -| -fix | apply all suggested fixes | -| -flags | print analyzer flags in JSON | -| -json | emit JSON output | -| -memprofile string | write memory profile to this file | -| -source | no effect (deprecated) | -| -tags string | no effect (deprecated) | -| -trace string | write trace log to this file | -| -v | no effect (deprecated) | - -# LICENSE - -[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg)](http://www.opensource.org/licenses/MIT) - -This is distributed under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/kyoh86/exportloopref/exportloopref.go b/vendor/github.com/kyoh86/exportloopref/exportloopref.go deleted file mode 100644 index d071d5c35..000000000 --- a/vendor/github.com/kyoh86/exportloopref/exportloopref.go +++ /dev/null @@ -1,334 +0,0 @@ -package exportloopref - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" -) - -var Analyzer = &analysis.Analyzer{ - Name: "exportloopref", - Doc: "checks for pointers to enclosing loop variables", - Run: run, - RunDespiteErrors: true, - Requires: []*analysis.Analyzer{inspect.Analyzer}, -} - -func run(pass *analysis.Pass) (interface{}, error) { - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - search := &Searcher{ - LoopVars: map[token.Pos]struct{}{}, - LocalVars: map[token.Pos]map[token.Pos]struct{}{}, - Pass: pass, - } - - nodeFilter := []ast.Node{ - (*ast.RangeStmt)(nil), - (*ast.ForStmt)(nil), - (*ast.DeclStmt)(nil), - (*ast.AssignStmt)(nil), - (*ast.UnaryExpr)(nil), - } - - inspect.WithStack(nodeFilter, search.CheckAndReport) - - return nil, nil -} - -type Searcher struct { - // LoopVars is positions that loop-variables are declared like below. - // - for , := range ... - // - for := ; ; - LoopVars map[token.Pos]struct{} - // LocalVars is positions of loops and the variables declared in them. - // Use this to determine if a point assignment is an export outside the loop. - LocalVars map[token.Pos]map[token.Pos]struct{} - - Pass *analysis.Pass -} - -// CheckAndReport inspects each node with stack. -// It is implemented as the I/F of the "golang.org/x/tools/go/analysis/passes/inspect".Analysis.WithStack. -func (s *Searcher) CheckAndReport(n ast.Node, push bool, stack []ast.Node) bool { - id, insert, digg := s.Check(n, stack) - if id == nil { - // no prob. - return digg - } - - // suggests fix - var suggest []analysis.SuggestedFix - if insert != token.NoPos { - suggest = []analysis.SuggestedFix{{ - Message: fmt.Sprintf("loop variable %s should be pinned", id.Name), - TextEdits: []analysis.TextEdit{{ - Pos: insert, - End: insert, - NewText: []byte(fmt.Sprintf("%[1]s := %[1]s\n", id.Name)), - }}, - }} - } - - // report a diagnostic - d := analysis.Diagnostic{Pos: id.Pos(), - End: id.End(), - Message: fmt.Sprintf("exporting a pointer for the loop variable %s", id.Name), - Category: "exportloopref", - SuggestedFixes: suggest, - } - s.Pass.Report(d) - return digg -} - -// Check each node and stack, whether it exports loop variables or not. -// Finding export, report the *ast.Ident of exported loop variable, -// and token.Pos to insert assignment to fix the diagnostic. -func (s *Searcher) Check(n ast.Node, stack []ast.Node) (loopVar *ast.Ident, insertPos token.Pos, digg bool) { - switch typed := n.(type) { - case *ast.RangeStmt: - s.parseRangeStmt(typed) - case *ast.ForStmt: - s.parseForStmt(typed) - case *ast.DeclStmt: - s.parseDeclStmt(typed, stack) - case *ast.AssignStmt: - s.parseAssignStmt(typed, stack) - - case *ast.UnaryExpr: - return s.checkUnaryExpr(typed, stack) - } - return nil, token.NoPos, true -} - -// parseRangeStmt will check range statement (i.e. `for , := range ...`), -// and collect positions of and . -func (s *Searcher) parseRangeStmt(n *ast.RangeStmt) { - s.storeLoopVars(n.Key) - s.storeLoopVars(n.Value) -} - -// parseForStmt will check for statement (i.e. `for := ; ; `), -// and collect positions of . -func (s *Searcher) parseForStmt(n *ast.ForStmt) { - switch post := n.Post.(type) { - case *ast.AssignStmt: - // e.g. for p = head; p != nil; p = p.next - for _, lhs := range post.Lhs { - s.storeLoopVars(lhs) - } - case *ast.IncDecStmt: - // e.g. for i := 0; i < n; i++ - s.storeLoopVars(post.X) - } -} - -func (s *Searcher) storeLoopVars(expr ast.Expr) { - if id, ok := expr.(*ast.Ident); ok { - s.LoopVars[id.Pos()] = struct{}{} - } -} - -// parseDeclStmt will parse declaring statement (i.e. `var`, `type`, `const`), -// and store the position if it is "var" declaration and is in any loop. -func (s *Searcher) parseDeclStmt(n *ast.DeclStmt, stack []ast.Node) { - genDecl, ok := n.Decl.(*ast.GenDecl) - if !ok { - // (dead branch) - // if the Decl is not GenDecl (i.e. `var`, `type` or `const` statement), it is ignored - return - } - if genDecl.Tok != token.VAR { - // if the Decl is not `var` (may be `type` or `const`), it is ignored - return - } - - loop, _ := s.innermostLoop(stack) - if loop == nil { - return - } - - // Register declared variables - for _, spec := range genDecl.Specs { - for _, name := range spec.(*ast.ValueSpec).Names { - s.storeLocalVar(loop, name) - } - } -} - -// parseDeclStmt will parse assignment statement (i.e. ` = `), -// and store the position if it is . -func (s *Searcher) parseAssignStmt(n *ast.AssignStmt, stack []ast.Node) { - if n.Tok != token.DEFINE { - // if the statement is simple assignment (without definement), it is ignored - return - } - - loop, _ := s.innermostLoop(stack) - if loop == nil { - return - } - - // Find statements declaring local variable - for _, h := range n.Lhs { - s.storeLocalVar(loop, h) - } -} - -func (s *Searcher) storeLocalVar(loop ast.Node, expr ast.Expr) { - loopPos := loop.Pos() - id, ok := expr.(*ast.Ident) - if !ok { - return - } - vars, ok := s.LocalVars[loopPos] - if !ok { - vars = map[token.Pos]struct{}{} - } - vars[id.Obj.Pos()] = struct{}{} - s.LocalVars[loopPos] = vars -} - -func insertionPosition(block *ast.BlockStmt) token.Pos { - if len(block.List) > 0 { - return block.List[0].Pos() - } - return token.NoPos -} - -func (s *Searcher) innermostLoop(stack []ast.Node) (ast.Node, token.Pos) { - for i := len(stack) - 1; i >= 0; i-- { - switch typed := stack[i].(type) { - case *ast.RangeStmt: - return typed, insertionPosition(typed.Body) - case *ast.ForStmt: - return typed, insertionPosition(typed.Body) - } - } - return nil, token.NoPos -} - -// checkUnaryExpr check unary expression (i.e. like `-x`, `*p` or `&v`) and stack. -// THIS IS THE ESSENTIAL PART OF THIS PARSER. -func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Ident, token.Pos, bool) { - if n.Op != token.AND { - return nil, token.NoPos, true - } - - loop, insert := s.innermostLoop(stack) - if loop == nil { - return nil, token.NoPos, true - } - - // Get identity of the referred item - id := s.getIdentity(n.X) - if id == nil { - return nil, token.NoPos, true - } - - // If the identity is not the loop statement variable, - // it will not be reported. - if _, isDecl := s.LoopVars[id.Obj.Pos()]; !isDecl { - return nil, token.NoPos, true - } - - // check stack append(), []X{}, map[Type]X{}, Struct{}, &Struct{}, X.(Type), (X) - // in the = - var mayRHPos token.Pos - for i := len(stack) - 2; i >= 0; i-- { - switch typed := stack[i].(type) { - case (*ast.UnaryExpr): - // noop - case (*ast.CompositeLit): - // noop - case (*ast.KeyValueExpr): - // noop - case (*ast.CallExpr): - fun, ok := typed.Fun.(*ast.Ident) - if !ok { - return nil, token.NoPos, false // it's calling a function other of `append`. It cannot be checked - } - - if fun.Name != "append" { - return nil, token.NoPos, false // it's calling a function other of `append`. It cannot be checked - } - - case (*ast.AssignStmt): - if len(typed.Rhs) != len(typed.Lhs) { - return nil, token.NoPos, false // dead logic - } - - // search x where Rhs[x].Pos() == mayRHPos - var index int - for ri, rh := range typed.Rhs { - if rh.Pos() == mayRHPos { - index = ri - break - } - } - - // check Lhs[x] is not local variable - lh := typed.Lhs[index] - isVar := s.isVar(loop, lh) - if !isVar { - return id, insert, false - } - - return nil, token.NoPos, true - default: - // Other statement is not able to be checked. - return nil, token.NoPos, false - } - - // memory an expr that may be right-hand in the AssignStmt - mayRHPos = stack[i].Pos() - } - return nil, token.NoPos, true -} - -func (s *Searcher) isVar(loop ast.Node, expr ast.Expr) bool { - vars := s.LocalVars[loop.Pos()] // map[token.Pos]struct{} - if vars == nil { - return false - } - switch typed := expr.(type) { - case (*ast.Ident): - if typed.Obj == nil { - return false // global var in another file (ref: #13) - } - _, isVar := vars[typed.Obj.Pos()] - return isVar - case (*ast.IndexExpr): // like X[Y], check X - return s.isVar(loop, typed.X) - case (*ast.SelectorExpr): // like X.Y, check X - return s.isVar(loop, typed.X) - } - return false -} - -// Get variable identity -func (s *Searcher) getIdentity(expr ast.Expr) *ast.Ident { - switch typed := expr.(type) { - case *ast.SelectorExpr: - // Ignore if the parent is pointer ref (fix for #2) - if _, ok := s.Pass.TypesInfo.Types[typed.X].Type.(*types.Pointer); ok { - return nil - } - - // Get parent identity; i.e. `a.b` of the `a.b.c`. - return s.getIdentity(typed.X) - - case *ast.Ident: - // Get simple identity; i.e. `a` of the `a`. - if typed.Obj == nil { - return nil - } - return typed - } - return nil -} diff --git a/vendor/github.com/ldez/exptostd/.gitignore b/vendor/github.com/ldez/exptostd/.gitignore new file mode 100644 index 000000000..ec3a60398 --- /dev/null +++ b/vendor/github.com/ldez/exptostd/.gitignore @@ -0,0 +1,2 @@ +/exptostd +.idea diff --git a/vendor/github.com/ldez/exptostd/.golangci.yml b/vendor/github.com/ldez/exptostd/.golangci.yml new file mode 100644 index 000000000..e615d3e5c --- /dev/null +++ b/vendor/github.com/ldez/exptostd/.golangci.yml @@ -0,0 +1,83 @@ +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - nlreturn + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - varnamelen + - nilnil + - errchkjson + - nonamedreturns + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 20 + goconst: + min-len: 5 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + wsl: + force-case-trailing-whitespace: 1 + allow-trailing-comment: true + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + settings: + hugeParam: + sizeThreshold: 100 + nolintlint: + require-specific: true + require-explanation: true + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 + +output: + show-stats: true + sort-results: true + sort-order: + - linter + - file + +run: + timeout: 5m diff --git a/vendor/github.com/ldez/exptostd/LICENSE b/vendor/github.com/ldez/exptostd/LICENSE new file mode 100644 index 000000000..c1bf0c328 --- /dev/null +++ b/vendor/github.com/ldez/exptostd/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2024 Fernandez Ludovic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ldez/exptostd/Makefile b/vendor/github.com/ldez/exptostd/Makefile new file mode 100644 index 000000000..ad7275149 --- /dev/null +++ b/vendor/github.com/ldez/exptostd/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -ldflags "-s -w" -trimpath ./cmd/exptostd/ diff --git a/vendor/github.com/ldez/exptostd/exptostd.go b/vendor/github.com/ldez/exptostd/exptostd.go new file mode 100644 index 000000000..cf6c5e842 --- /dev/null +++ b/vendor/github.com/ldez/exptostd/exptostd.go @@ -0,0 +1,475 @@ +// Package exptostd It is an analyzer that detects functions from golang.org/x/exp/ that can be replaced by std functions. +package exptostd + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/printer" + "go/token" + "go/types" + "os" + "slices" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + pkgExpMaps = "golang.org/x/exp/maps" + pkgExpSlices = "golang.org/x/exp/slices" + pkgExpConstraints = "golang.org/x/exp/constraints" +) + +const ( + pkgMaps = "maps" + pkgSlices = "slices" + pkgComp = "cmp" +) + +const ( + go123 = 123 + go121 = 121 + goDevel = 666 +) + +// Result is step analysis results. +type Result struct { + shouldKeepImport bool + Diagnostics []analysis.Diagnostic +} + +type stdReplacement[T ast.Expr] struct { + MinGo int + Text string + Suggested func(callExpr T) (analysis.SuggestedFix, error) +} + +type analyzer struct { + mapsPkgReplacements map[string]stdReplacement[*ast.CallExpr] + slicesPkgReplacements map[string]stdReplacement[*ast.CallExpr] + constraintsPkgReplacements map[string]stdReplacement[*ast.SelectorExpr] + + skipGoVersionDetection bool + goVersion int +} + +// NewAnalyzer create a new Analyzer. +func NewAnalyzer() *analysis.Analyzer { + _, skip := os.LookupEnv("EXPTOSTD_SKIP_GO_VERSION_CHECK") + + l := &analyzer{ + skipGoVersionDetection: skip, + mapsPkgReplacements: map[string]stdReplacement[*ast.CallExpr]{ + "Keys": {MinGo: go123, Text: "slices.AppendSeq(make([]T, 0, len(data)), maps.Keys(data))", Suggested: suggestedFixForKeysOrValues}, + "Values": {MinGo: go123, Text: "slices.AppendSeq(make([]T, 0, len(data)), maps.Values(data))", Suggested: suggestedFixForKeysOrValues}, + "Equal": {MinGo: go121, Text: "maps.Equal()"}, + "EqualFunc": {MinGo: go121, Text: "maps.EqualFunc()"}, + "Clone": {MinGo: go121, Text: "maps.Clone()"}, + "Copy": {MinGo: go121, Text: "maps.Copy()"}, + "DeleteFunc": {MinGo: go121, Text: "maps.DeleteFunc()"}, + "Clear": {MinGo: go121, Text: "clear()", Suggested: suggestedFixForClear}, + }, + slicesPkgReplacements: map[string]stdReplacement[*ast.CallExpr]{ + "Equal": {MinGo: go121, Text: "slices.Equal()"}, + "EqualFunc": {MinGo: go121, Text: "slices.EqualFunc()"}, + "Compare": {MinGo: go121, Text: "slices.Compare()"}, + "CompareFunc": {MinGo: go121, Text: "slices.CompareFunc()"}, + "Index": {MinGo: go121, Text: "slices.Index()"}, + "IndexFunc": {MinGo: go121, Text: "slices.IndexFunc()"}, + "Contains": {MinGo: go121, Text: "slices.Contains()"}, + "ContainsFunc": {MinGo: go121, Text: "slices.ContainsFunc()"}, + "Insert": {MinGo: go121, Text: "slices.Insert()"}, + "Delete": {MinGo: go121, Text: "slices.Delete()"}, + "DeleteFunc": {MinGo: go121, Text: "slices.DeleteFunc()"}, + "Replace": {MinGo: go121, Text: "slices.Replace()"}, + "Clone": {MinGo: go121, Text: "slices.Clone()"}, + "Compact": {MinGo: go121, Text: "slices.Compact()"}, + "CompactFunc": {MinGo: go121, Text: "slices.CompactFunc()"}, + "Grow": {MinGo: go121, Text: "slices.Grow()"}, + "Clip": {MinGo: go121, Text: "slices.Clip()"}, + "Reverse": {MinGo: go121, Text: "slices.Reverse()"}, + + "Sort": {MinGo: go121, Text: "slices.Sort()"}, + "SortFunc": {MinGo: go121, Text: "slices.SortFunc()"}, + "SortStableFunc": {MinGo: go121, Text: "slices.SortStableFunc()"}, + "IsSorted": {MinGo: go121, Text: "slices.IsSorted()"}, + "IsSortedFunc": {MinGo: go121, Text: "slices.IsSortedFunc()"}, + "Min": {MinGo: go121, Text: "slices.Min()"}, + "MinFunc": {MinGo: go121, Text: "slices.MinFunc()"}, + "Max": {MinGo: go121, Text: "slices.Max()"}, + "MaxFunc": {MinGo: go121, Text: "slices.MaxFunc()"}, + "BinarySearch": {MinGo: go121, Text: "slices.BinarySearch()"}, + "BinarySearchFunc": {MinGo: go121, Text: "slices.BinarySearchFunc()"}, + }, + constraintsPkgReplacements: map[string]stdReplacement[*ast.SelectorExpr]{ + "Ordered": {MinGo: go121, Text: "cmp.Ordered", Suggested: suggestedFixForConstraintsOrder}, + }, + } + + return &analysis.Analyzer{ + Name: "exptostd", + Doc: "Detects functions from golang.org/x/exp/ that can be replaced by std functions.", + Run: l.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +//nolint:gocognit,gocyclo // The complexity is expected by the cases to handle. +func (a *analyzer) run(pass *analysis.Pass) (any, error) { + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, nil + } + + a.goVersion = getGoVersion(pass) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + (*ast.FuncDecl)(nil), + (*ast.TypeSpec)(nil), + (*ast.ImportSpec)(nil), + } + + imports := map[string]*ast.ImportSpec{} + + var shouldKeepExpMaps bool + + var resultExpSlices Result + + resultExpConstraints := &Result{} + + insp.Preorder(nodeFilter, func(n ast.Node) { + switch node := n.(type) { + case *ast.ImportSpec: + // skip aliases + if node.Name == nil || node.Name.Name == "" { + imports[trimImportPath(node)] = node + } + + return + + case *ast.CallExpr: + selExpr, ok := node.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + ident, ok := selExpr.X.(*ast.Ident) + if !ok { + return + } + + switch ident.Name { + case pkgMaps: + diagnostic, usage := a.detectPackageUsage(pass, a.mapsPkgReplacements, selExpr, ident, node, pkgExpMaps) + if usage { + pass.Report(diagnostic) + } + + shouldKeepExpMaps = shouldKeepExpMaps || !usage + + case pkgSlices: + diagnostic, usage := a.detectPackageUsage(pass, a.slicesPkgReplacements, selExpr, ident, node, pkgExpSlices) + if usage { + resultExpSlices.Diagnostics = append(resultExpSlices.Diagnostics, diagnostic) + } + + resultExpSlices.shouldKeepImport = resultExpSlices.shouldKeepImport || !usage + } + + case *ast.FuncDecl: + if node.Type.TypeParams != nil { + for _, field := range node.Type.TypeParams.List { + a.detectConstraintsUsage(pass, field.Type, resultExpConstraints) + } + } + + case *ast.TypeSpec: + if node.TypeParams != nil { + for _, field := range node.TypeParams.List { + a.detectConstraintsUsage(pass, field.Type, resultExpConstraints) + } + } + + interfaceType, ok := node.Type.(*ast.InterfaceType) + if !ok { + return + } + + for _, method := range interfaceType.Methods.List { + switch exp := method.Type.(type) { + case *ast.BinaryExpr: + a.detectConstraintsUsage(pass, exp.X, resultExpConstraints) + a.detectConstraintsUsage(pass, exp.Y, resultExpConstraints) + + case *ast.SelectorExpr: + a.detectConstraintsUsage(pass, exp, resultExpConstraints) + } + } + } + }) + + // maps + a.suggestReplaceImport(pass, imports, shouldKeepExpMaps, pkgExpMaps, pkgMaps) + + // slices + if resultExpSlices.shouldKeepImport { + for _, diagnostic := range resultExpSlices.Diagnostics { + pass.Report(diagnostic) + } + } else { + a.suggestReplaceImport(pass, imports, resultExpSlices.shouldKeepImport, pkgExpSlices, pkgSlices) + } + + // constraints + a.suggestReplaceImport(pass, imports, resultExpConstraints.shouldKeepImport, pkgExpConstraints, pkgComp) + + return nil, nil +} + +func (a *analyzer) detectPackageUsage(pass *analysis.Pass, + replacements map[string]stdReplacement[*ast.CallExpr], + selExpr *ast.SelectorExpr, ident *ast.Ident, callExpr *ast.CallExpr, + importPath string, +) (analysis.Diagnostic, bool) { + rp, ok := replacements[selExpr.Sel.Name] + if !ok { + return analysis.Diagnostic{}, false + } + + if !a.skipGoVersionDetection && rp.MinGo > a.goVersion { + return analysis.Diagnostic{}, false + } + + if !isPackageUsed(pass, ident, importPath) { + return analysis.Diagnostic{}, false + } + + diagnostic := analysis.Diagnostic{ + Pos: callExpr.Pos(), + Message: fmt.Sprintf("%s.%s() can be replaced by %s", importPath, selExpr.Sel.Name, rp.Text), + } + + if rp.Suggested != nil { + fix, err := rp.Suggested(callExpr) + if err != nil { + diagnostic.Message = fmt.Sprintf("Suggested fix error: %v", err) + } else { + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, fix) + } + } + + return diagnostic, true +} + +func (a *analyzer) detectConstraintsUsage(pass *analysis.Pass, expr ast.Expr, result *Result) { + selExpr, ok := expr.(*ast.SelectorExpr) + if !ok { + return + } + + ident, ok := selExpr.X.(*ast.Ident) + if !ok { + return + } + + if !isPackageUsed(pass, ident, pkgExpConstraints) { + return + } + + rp, ok := a.constraintsPkgReplacements[selExpr.Sel.Name] + if !ok { + result.shouldKeepImport = true + return + } + + if !a.skipGoVersionDetection && rp.MinGo > a.goVersion { + result.shouldKeepImport = true + return + } + + diagnostic := analysis.Diagnostic{ + Pos: selExpr.Pos(), + Message: fmt.Sprintf("%s.%s can be replaced by %s", pkgExpConstraints, selExpr.Sel.Name, rp.Text), + } + + if rp.Suggested != nil { + fix, err := rp.Suggested(selExpr) + if err != nil { + diagnostic.Message = fmt.Sprintf("Suggested fix error: %v", err) + } else { + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, fix) + } + } + + pass.Report(diagnostic) +} + +func (a *analyzer) suggestReplaceImport(pass *analysis.Pass, imports map[string]*ast.ImportSpec, shouldKeep bool, importPath, stdPackage string) { + imp, ok := imports[importPath] + if !ok || shouldKeep { + return + } + + src := trimImportPath(imp) + + pass.Report(analysis.Diagnostic{ + Pos: imp.Pos(), + End: imp.End(), + Message: fmt.Sprintf("Import statement '%s' can be replaced by '%s'", src, stdPackage), + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: imp.Path.Pos(), + End: imp.Path.End(), + NewText: []byte(string(imp.Path.Value[0]) + stdPackage + string(imp.Path.Value[0])), + }}, + }}, + }) +} + +func suggestedFixForClear(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) { + s := &ast.CallExpr{ + Fun: ast.NewIdent("clear"), + Args: callExpr.Args, + Ellipsis: callExpr.Ellipsis, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), s) + if err != nil { + return analysis.SuggestedFix{}, fmt.Errorf("print suggested fix: %w", err) + } + + return analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: callExpr.Pos(), + End: callExpr.End(), + NewText: buf.Bytes(), + }}, + }, nil +} + +func suggestedFixForKeysOrValues(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) { + s := &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: &ast.Ident{Name: "slices"}, + Sel: &ast.Ident{Name: "AppendSeq"}, + }, + Args: []ast.Expr{ + &ast.CallExpr{ + Fun: &ast.Ident{Name: "make"}, + Args: []ast.Expr{ + &ast.ArrayType{ + Elt: &ast.Ident{Name: "T"}, // TODO(ldez) improve the type detection. + }, + &ast.BasicLit{Kind: token.INT, Value: "0"}, + &ast.CallExpr{ + Fun: &ast.Ident{Name: "len"}, + Args: callExpr.Args, + }, + }, + }, + callExpr, + }, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), s) + if err != nil { + return analysis.SuggestedFix{}, fmt.Errorf("print suggested fix: %w", err) + } + + return analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: callExpr.Pos(), + End: callExpr.End(), + NewText: buf.Bytes(), + }}, + }, nil +} + +func suggestedFixForConstraintsOrder(selExpr *ast.SelectorExpr) (analysis.SuggestedFix, error) { + s := &ast.SelectorExpr{ + X: &ast.Ident{Name: pkgComp}, + Sel: &ast.Ident{Name: "Ordered"}, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), s) + if err != nil { + return analysis.SuggestedFix{}, fmt.Errorf("print suggested fix: %w", err) + } + + return analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: selExpr.Pos(), + End: selExpr.End(), + NewText: buf.Bytes(), + }}, + }, nil +} + +func isPackageUsed(pass *analysis.Pass, ident *ast.Ident, importPath string) bool { + obj := pass.TypesInfo.Uses[ident] + if obj == nil { + return false + } + + pkg, ok := obj.(*types.PkgName) + if !ok { + return false + } + + if pkg.Imported().Path() != importPath { + return false + } + + return true +} + +func getGoVersion(pass *analysis.Pass) int { + // Prior to go1.22, versions.FileVersion returns only the toolchain version, + // which is of no use to us, + // so disable this analyzer on earlier versions. + if !slices.Contains(build.Default.ReleaseTags, "go1.22") { + return 0 // false + } + + pkgVersion := pass.Pkg.GoVersion() + if pkgVersion == "" { + // Empty means Go devel. + return goDevel // true + } + + raw := strings.TrimPrefix(pkgVersion, "go") + + // prerelease version (go1.24rc1) + idx := strings.IndexFunc(raw, func(r rune) bool { + return (r < '0' || r > '9') && r != '.' + }) + + if idx != -1 { + raw = raw[:idx] + } + + vParts := strings.Split(raw, ".") + + v, err := strconv.Atoi(strings.Join(vParts[:2], "")) + if err != nil { + v = 116 + } + + return v +} + +func trimImportPath(spec *ast.ImportSpec) string { + return spec.Path.Value[1 : len(spec.Path.Value)-1] +} diff --git a/vendor/github.com/ldez/exptostd/readme.md b/vendor/github.com/ldez/exptostd/readme.md new file mode 100644 index 000000000..bd1df8d54 --- /dev/null +++ b/vendor/github.com/ldez/exptostd/readme.md @@ -0,0 +1,116 @@ +# ExpToStd + +Detects functions from golang.org/x/exp/ that can be replaced by std functions. + +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) + +Actual detections: + +- `golang.org/x/exp/maps`: + - `Keys` + - `Values` + - `Equal` + - `EqualFunc` + - `Clone` + - `Copy` + - `DeleteFunc` + - `Clear` + +- `golang.org/x/exp/slices`: + - `Equal` + - `EqualFunc` + - `Compare` + - `CompareFunc` + - `Index` + - `IndexFunc` + - `Contains` + - `ContainsFunc` + - `Insert` + - `Delete` + - `DeleteFunc` + - `Replace` + - `Clone` + - `Compact` + - `CompactFunc` + - `Grow` + - `Clip` + - `Reverse` + - `Sort` + - `SortFunc` + - `SortStableFunc` + - `IsSorted` + - `IsSortedFunc` + - `Min` + - `MinFunc` + - `Max` + - `MaxFunc` + - `BinarySearch` + - `BinarySearchFunc` + +- `golang.org/x/exp/constraints`: + - `Ordered` + +## Usages + +### Inside golangci-lint + +Recommended. + +```yaml +linters: + enable: + - exptostd +``` + +### As a CLI + +```bash +go install github.com/ldez/exptostd/cmd/exptostd@latest +``` + +```bash +./exptostd ./... +``` + +## Examples + +```go +package foo + +import ( + "fmt" + + "golang.org/x/exp/maps" +) + +func foo(m map[string]string) { + clone := maps.Clone(m) + + fmt.Println(clone) +} +``` + +It can be replaced by: + +```go +package foo + +import ( + "fmt" + "maps" +) + +func foo(m map[string]string) { + clone := maps.Clone(m) + + fmt.Println(clone) +} + +``` + +## References + +- https://tip.golang.org/doc/go1.21#maps +- https://tip.golang.org/doc/go1.21#slices +- https://tip.golang.org/doc/go1.23#iterators +- https://tip.golang.org/doc/go1.21#cmp diff --git a/vendor/github.com/ldez/gomoddirectives/.golangci.yml b/vendor/github.com/ldez/gomoddirectives/.golangci.yml index 034745570..7f2566656 100644 --- a/vendor/github.com/ldez/gomoddirectives/.golangci.yml +++ b/vendor/github.com/ldez/gomoddirectives/.golangci.yml @@ -1,9 +1,31 @@ -run: - timeout: 2m +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - prealloc + - bodyclose + - wsl + - nlreturn + - mnd + - testpackage + - paralleltest + - tparallel + - err113 + - wrapcheck + - exhaustive + - exhaustruct + - varnamelen linters-settings: govet: enable-all: true + disable: + - fieldalignment gocyclo: min-complexity: 12 goconst: @@ -49,39 +71,6 @@ linters-settings: rules: json: pascal -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - cyclop # duplicate of gocyclo - - lll - - dupl - - prealloc - - bodyclose - - wsl - - nlreturn - - gomnd - - testpackage - - paralleltest - - tparallel - - goerr113 - - wrapcheck - - exhaustive - - exhaustruct - - varnamelen - issues: exclude-use-default: false max-issues-per-linter: 0 @@ -92,11 +81,20 @@ issues: exclude-rules: - path: "(.+)_test.go" linters: - - funlen - - goconst + - funlen + - goconst + - maintidx - path: cmd/gomoddirectives/gomoddirectives.go + linters: + - forbidigo text: 'use of `fmt.Println` forbidden' output: show-stats: true sort-results: true + sort-order: + - linter + - file + +run: + timeout: 2m diff --git a/vendor/github.com/ldez/gomoddirectives/LICENSE b/vendor/github.com/ldez/gomoddirectives/LICENSE index caed523b4..c1bf0c328 100644 --- a/vendor/github.com/ldez/gomoddirectives/LICENSE +++ b/vendor/github.com/ldez/gomoddirectives/LICENSE @@ -175,7 +175,7 @@ END OF TERMS AND CONDITIONS - Copyright 2021 Fernandez Ludovic + Copyright 2024 Fernandez Ludovic Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go index 2a4c90474..857d22f9b 100644 --- a/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go +++ b/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go @@ -1,17 +1,26 @@ -// Package gomoddirectives a linter that handle `replace`, `retract`, `exclude` directives into `go.mod`. +// Package gomoddirectives a linter that handle directives into `go.mod`. package gomoddirectives import ( + "context" "fmt" "go/token" + "regexp" "strings" + "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" + "golang.org/x/tools/go/analysis" ) const ( reasonRetract = "a comment is mandatory to explain why the version has been retracted" reasonExclude = "exclude directive is not allowed" + reasonToolchain = "toolchain directive is not allowed" + reasonToolchainPattern = "toolchain directive (%s) doesn't match the pattern '%s'" + reasonTool = "tool directive is not allowed" + reasonGoDebug = "godebug directive is not allowed" + reasonGoVersion = "go directive (%s) doesn't match the pattern '%s'" reasonReplaceLocal = "local replacement are not allowed" reasonReplace = "replacement are not allowed" reasonReplaceIdentical = "the original module and the replacement are identical" @@ -44,6 +53,36 @@ type Options struct { ReplaceAllowLocal bool ExcludeForbidden bool RetractAllowNoExplanation bool + ToolchainForbidden bool + ToolchainPattern *regexp.Regexp + ToolForbidden bool + GoDebugForbidden bool + GoVersionPattern *regexp.Regexp +} + +// AnalyzePass analyzes a pass. +func AnalyzePass(pass *analysis.Pass, opts Options) ([]Result, error) { + info, err := gomod.GetModuleInfo(context.Background()) + if err != nil { + return nil, fmt.Errorf("get information about modules: %w", err) + } + + goMod := info[0].GoMod + if pass.Module != nil && pass.Module.Path != "" { + for _, m := range info { + if m.Path == pass.Module.Path { + goMod = m.GoMod + break + } + } + } + + f, err := parseGoMod(goMod) + if err != nil { + return nil, fmt.Errorf("parse %s: %w", goMod, err) + } + + return AnalyzeFile(f, opts), nil } // Analyze analyzes a project. @@ -58,58 +97,135 @@ func Analyze(opts Options) ([]Result, error) { // AnalyzeFile analyzes a mod file. func AnalyzeFile(file *modfile.File, opts Options) []Result { + checks := []func(file *modfile.File, opts Options) []Result{ + checkRetractDirectives, + checkExcludeDirectives, + checkToolDirectives, + checkReplaceDirectives, + checkToolchainDirective, + checkGoDebugDirectives, + checkGoVersionDirectives, + } + var results []Result + for _, check := range checks { + results = append(results, check(file, opts)...) + } - if !opts.RetractAllowNoExplanation { - for _, r := range file.Retract { - if r.Rationale != "" { - continue - } + return results +} - results = append(results, NewResult(file, r.Syntax, reasonRetract)) - } +func checkGoVersionDirectives(file *modfile.File, opts Options) []Result { + if file == nil || file.Go == nil || opts.GoVersionPattern == nil || opts.GoVersionPattern.MatchString(file.Go.Version) { + return nil } - if opts.ExcludeForbidden { - for _, e := range file.Exclude { - results = append(results, NewResult(file, e.Syntax, reasonExclude)) + return []Result{NewResult(file, file.Go.Syntax, fmt.Sprintf(reasonGoVersion, file.Go.Version, opts.GoVersionPattern.String()))} +} + +func checkToolchainDirective(file *modfile.File, opts Options) []Result { + if file.Toolchain == nil { + return nil + } + + if opts.ToolchainForbidden { + return []Result{NewResult(file, file.Toolchain.Syntax, reasonToolchain)} + } + + if opts.ToolchainPattern == nil { + return nil + } + + if !opts.ToolchainPattern.MatchString(file.Toolchain.Name) { + return []Result{NewResult(file, file.Toolchain.Syntax, fmt.Sprintf(reasonToolchainPattern, file.Toolchain.Name, opts.ToolchainPattern.String()))} + } + + return nil +} + +func checkRetractDirectives(file *modfile.File, opts Options) []Result { + if opts.RetractAllowNoExplanation { + return nil + } + + var results []Result + + for _, retract := range file.Retract { + if retract.Rationale != "" { + continue } + + results = append(results, NewResult(file, retract.Syntax, reasonRetract)) + } + + return results +} + +func checkExcludeDirectives(file *modfile.File, opts Options) []Result { + if !opts.ExcludeForbidden { + return nil + } + + var results []Result + + for _, exclude := range file.Exclude { + results = append(results, NewResult(file, exclude.Syntax, reasonExclude)) } + return results +} + +func checkToolDirectives(file *modfile.File, opts Options) []Result { + if !opts.ToolForbidden { + return nil + } + + var results []Result + + for _, tool := range file.Tool { + results = append(results, NewResult(file, tool.Syntax, reasonTool)) + } + + return results +} + +func checkReplaceDirectives(file *modfile.File, opts Options) []Result { + var results []Result + uniqReplace := map[string]struct{}{} - for _, r := range file.Replace { - reason := check(opts, r) + for _, replace := range file.Replace { + reason := checkReplaceDirective(opts, replace) if reason != "" { - results = append(results, NewResult(file, r.Syntax, reason)) + results = append(results, NewResult(file, replace.Syntax, reason)) continue } - if r.Old.Path == r.New.Path && r.Old.Version == r.New.Version { - results = append(results, NewResult(file, r.Syntax, reasonReplaceIdentical)) + if replace.Old.Path == replace.New.Path && replace.Old.Version == replace.New.Version { + results = append(results, NewResult(file, replace.Syntax, reasonReplaceIdentical)) continue } - if _, ok := uniqReplace[r.Old.Path+r.Old.Version]; ok { - results = append(results, NewResult(file, r.Syntax, reasonReplaceDuplicate)) + if _, ok := uniqReplace[replace.Old.Path+replace.Old.Version]; ok { + results = append(results, NewResult(file, replace.Syntax, reasonReplaceDuplicate)) } - uniqReplace[r.Old.Path+r.Old.Version] = struct{}{} + uniqReplace[replace.Old.Path+replace.Old.Version] = struct{}{} } return results } -func check(o Options, r *modfile.Replace) string { +func checkReplaceDirective(opts Options, r *modfile.Replace) string { if isLocal(r) { - if o.ReplaceAllowLocal { + if opts.ReplaceAllowLocal { return "" } return fmt.Sprintf("%s: %s", reasonReplaceLocal, r.Old.Path) } - for _, v := range o.ReplaceAllowList { + for _, v := range opts.ReplaceAllowList { if r.Old.Path == v { return "" } @@ -118,6 +234,20 @@ func check(o Options, r *modfile.Replace) string { return fmt.Sprintf("%s: %s", reasonReplace, r.Old.Path) } +func checkGoDebugDirectives(file *modfile.File, opts Options) []Result { + if !opts.GoDebugForbidden { + return nil + } + + var results []Result + + for _, goDebug := range file.Godebug { + results = append(results, NewResult(file, goDebug.Syntax, reasonGoDebug)) + } + + return results +} + // Filesystem paths found in "replace" directives are represented by a path with an empty version. // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 func isLocal(r *modfile.Replace) bool { diff --git a/vendor/github.com/ldez/gomoddirectives/module.go b/vendor/github.com/ldez/gomoddirectives/module.go index 4cb365379..c3e47c8a4 100644 --- a/vendor/github.com/ldez/gomoddirectives/module.go +++ b/vendor/github.com/ldez/gomoddirectives/module.go @@ -1,45 +1,32 @@ package gomoddirectives import ( - "bytes" - "encoding/json" - "errors" + "context" "fmt" "os" - "os/exec" + "path/filepath" + "github.com/ldez/grignotin/goenv" "golang.org/x/mod/modfile" ) -type modInfo struct { - Path string `json:"Path"` - Dir string `json:"Dir"` - GoMod string `json:"GoMod"` - GoVersion string `json:"GoVersion"` - Main bool `json:"Main"` -} - // GetModuleFile gets module file. func GetModuleFile() (*modfile.File, error) { - // https://github.com/golang/go/issues/44753#issuecomment-790089020 - cmd := exec.Command("go", "list", "-m", "-json") - - raw, err := cmd.Output() + goMod, err := goenv.GetOne(context.Background(), goenv.GOMOD) if err != nil { - return nil, fmt.Errorf("command go list: %w: %s", err, string(raw)) + return nil, err } - var v modInfo - err = json.NewDecoder(bytes.NewBuffer(raw)).Decode(&v) + mod, err := parseGoMod(goMod) if err != nil { - return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(raw)) + return nil, fmt.Errorf("failed to parse go.mod (%s): %w", goMod, err) } - if v.GoMod == "" { - return nil, errors.New("working directory is not part of a module") - } + return mod, nil +} - raw, err = os.ReadFile(v.GoMod) +func parseGoMod(goMod string) (*modfile.File, error) { + raw, err := os.ReadFile(filepath.Clean(goMod)) if err != nil { return nil, fmt.Errorf("reading go.mod file: %w", err) } diff --git a/vendor/github.com/ldez/gomoddirectives/readme.md b/vendor/github.com/ldez/gomoddirectives/readme.md index 510c8502e..7d6d2765b 100644 --- a/vendor/github.com/ldez/gomoddirectives/readme.md +++ b/vendor/github.com/ldez/gomoddirectives/readme.md @@ -1,16 +1,196 @@ # gomoddirectives +A linter that handle directives into `go.mod`. + [![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) [![Build Status](https://github.com/ldez/gomoddirectives/workflows/Main/badge.svg?branch=master)](https://github.com/ldez/gomoddirectives/actions) -A linter that handle [`replace`](https://golang.org/ref/mod#go-mod-file-replace), [`retract`](https://golang.org/ref/mod#go-mod-file-retract), [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives into `go.mod`. +## Usage + +### Inside golangci-lint + +Recommended. + +```yml +linters: + enable: + - gomoddirectives + +linters-settings: + gomoddirectives: + # Allow local `replace` directives. + # Default: false + replace-local: true + + # List of allowed `replace` directives. + # Default: [] + replace-allow-list: + - launchpad.net/gocheck + # Allow to not explain why the version has been retracted in the `retract` directives. + # Default: false + retract-allow-no-explanation: true + + # Forbid the use of the `exclude` directives. + # Default: false + exclude-forbidden: true + + # Forbid the use of the `toolchain` directive. + # Default: false + toolchain-forbidden: true + + # Defines a pattern to validate `toolchain` directive. + # Default: '' (no match) + toolchain-pattern: 'go1\.22\.\d+$' + + # Forbid the use of the `tool` directives. + # Default: false + tool-forbidden: true + + # Forbid the use of the `godebug` directive. + # Default: false + go-debug-forbidden: true + + # Defines a pattern to validate `go` minimum version directive. + # Default: '' (no match) + go-version-pattern: '1\.\d+(\.0)?$' +``` + +### As a CLI + +``` +gomoddirectives [flags] + +Flags: + -exclude + Forbid the use of exclude directives + -godebug + Forbid the use of godebug directives + -goversion string + Pattern to validate go min version directive + -h Show this help. + -list value + List of allowed replace directives + -local + Allow local replace directives + -retract-no-explanation + Allow to use retract directives without explanation + -tool + Forbid the use of tool directives + -toolchain + Forbid the use of toolchain directive + -toolchain-pattern string + Pattern to validate toolchain directive +``` + +## Details + +### [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives + +- Force explanation for `retract` directives. + +```go +module example.com/foo + +go 1.22 + +require ( + github.com/ldez/grignotin v0.4.1 +) + +retract ( + v1.0.0 // Explanation +) +``` + +### [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives + +- Ban all `replace` directives. +- Allow only local `replace` directives. +- Allow only some `replace` directives. +- Detect duplicated `replace` directives. +- Detect identical `replace` directives. + +```go +module example.com/foo + +go 1.22 + +require ( + github.com/ldez/grignotin v0.4.1 +) + +replace github.com/ldez/grignotin => ../grignotin/ +``` + +### [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives + +- Ban all `exclude` directives. + +```go +module example.com/foo + +go 1.22 + +require ( + github.com/ldez/grignotin v0.4.1 +) + +exclude ( + golang.org/x/crypto v1.4.5 + golang.org/x/text v1.6.7 +) +``` + +### [`tool`](https://golang.org/ref/mod#go-mod-file-tool) directives + +- Ban all `tool` directives. + +```go +module example.com/foo + +go 1.24 + +tool ( + example.com/module/cmd/a + example.com/module/cmd/b +) +``` + +### [`toolchain`](https://golang.org/ref/mod#go-mod-file-toolchain) directive + +- Ban `toolchain` directive. +- Use a regular expression to constraint the Go minimum version. + +```go +module example.com/foo + +go 1.22 + +toolchain go1.23.3 +``` + +### [`godebug`](https://go.dev/ref/mod#go-mod-file-godebug) directives + +- Ban `godebug` directive. + +```go +module example.com/foo + +go 1.22 + +godebug default=go1.21 +godebug ( + panicnil=1 + asynctimerchan=0 +) +``` + +### [`go`](https://go.dev/ref/mod#go-mod-file-go) directive + +- Use a regular expression to constraint the Go minimum version. -Features: +```go +module example.com/foo -- ban all [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- allow only local [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- allow only some [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- force explanation for [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives -- ban all [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives -- detect duplicated [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- detect identical [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +go 1.22.0 +``` diff --git a/vendor/github.com/ldez/grignotin/goenv/goenv.go b/vendor/github.com/ldez/grignotin/goenv/goenv.go new file mode 100644 index 000000000..1f0c31e4e --- /dev/null +++ b/vendor/github.com/ldez/grignotin/goenv/goenv.go @@ -0,0 +1,50 @@ +// Package goenv A set of functions to get information from `go env`. +package goenv + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// GetAll gets information from "go env". +func GetAll(ctx context.Context) (map[string]string, error) { + v, err := Get(ctx) + if err != nil { + return nil, err + } + + return v, nil +} + +// GetOne gets information from "go env" for one environment variable. +func GetOne(ctx context.Context, name string) (string, error) { + v, err := Get(ctx, name) + if err != nil { + return "", err + } + + return v[name], nil +} + +// Get gets information from "go env" for one or several environment variables. +func Get(ctx context.Context, name ...string) (map[string]string, error) { + args := append([]string{"env", "-json"}, name...) + cmd := exec.CommandContext(ctx, "go", args...) //nolint:gosec // The env var names must be checked by the user. + + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("command %q: %w: %s", strings.Join(cmd.Args, " "), err, string(out)) + } + + v := map[string]string{} + err = json.NewDecoder(bytes.NewBuffer(out)).Decode(&v) + if err != nil { + return nil, err + } + + return v, nil +} diff --git a/vendor/github.com/ldez/grignotin/goenv/names.go b/vendor/github.com/ldez/grignotin/goenv/names.go new file mode 100644 index 000000000..a5d6eeeeb --- /dev/null +++ b/vendor/github.com/ldez/grignotin/goenv/names.go @@ -0,0 +1,276 @@ +package goenv + +// General-purpose environment variables. +// Reference: https://github.com/golang/go/blob/0afd7e85e5d7154161770f06a17d09bf1ffa3e94/src/cmd/go/internal/help/helpdoc.go#L490 +const ( + // GCCGO The gccgo command to run for 'go build -compiler=gccgo'. + GCCGO = "GCCGO" + // GO111MODULE Controls whether the go command runs in module-aware mode or GOPATH mode. + // May be "off", "on", or "auto". + // See https://golang.org/ref/mod#mod-commands. + GO111MODULE = "GO111MODULE" + // GOARCH The architecture, or processor, for which to compile code. + // Examples are amd64, 386, arm, ppc64. + GOARCH = "GOARCH" + // GOAUTH Controls authentication for go-import and HTTPS module mirror interactions. + // See 'go help goauth'. + GOAUTH = "GOAUTH" + // GOBIN The directory where 'go install' will install a command. + GOBIN = "GOBIN" + // GOCACHE The directory where the go command will store cached + // information for reuse in future builds. + GOCACHE = "GOCACHE" + // GOCACHEPROG A command (with optional space-separated flags) that implements an + // external go command build cache. + // See 'go doc cmd/go/internal/cacheprog'. + GOCACHEPROG = "GOCACHEPROG" + // GODEBUG Enable various debugging facilities. See https://go.dev/doc/godebug + // for details. + GODEBUG = "GODEBUG" + // GOENV The location of the Go environment configuration file. + // Cannot be set using 'go env -w'. + // Setting GOENV=off in the environment disables the use of the + // default configuration file. + GOENV = "GOENV" + // GOFLAGS A space-separated list of -flag=value settings to apply + // to go commands by default, when the given flag is known by + // the current command. Each entry must be a standalone flag. + // Because the entries are space-separated, flag values must + // not contain spaces. Flags listed on the command line + // are applied after this list and therefore override it. + GOFLAGS = "GOFLAGS" + // GOINSECURE Comma-separated list of glob patterns (in the syntax of Go's path.Match) + // of module path prefixes that should always be fetched in an insecure + // manner. Only applies to dependencies that are being fetched directly. + // GOINSECURE does not disable checksum database validation. GOPRIVATE or + // GONOSUMDB may be used to achieve that. + GOINSECURE = "GOINSECURE" + // GOMODCACHE The directory where the go command will store downloaded modules. + GOMODCACHE = "GOMODCACHE" + // GOOS The operating system for which to compile code. + // Examples are linux, darwin, windows, netbsd. + GOOS = "GOOS" + // GOPATH Controls where various files are stored. See: 'go help gopath'. + GOPATH = "GOPATH" + // GOPROXY URL of Go module proxy. See https://golang.org/ref/mod#environment-variables + // and https://golang.org/ref/mod#module-proxy for details. + GOPROXY = "GOPROXY" + // GOROOT The root of the go tree. + GOROOT = "GOROOT" + // GOSUMDB The name of checksum database to use and optionally its public key and + // URL. See https://golang.org/ref/mod#authenticating. + GOSUMDB = "GOSUMDB" + // GOTMPDIR The directory where the go command will write + // temporary source files, packages, and binaries. + GOTMPDIR = "GOTMPDIR" + // GOTOOLCHAIN Controls which Go toolchain is used. See https://go.dev/doc/toolchain. + GOTOOLCHAIN = "GOTOOLCHAIN" + // GOVCS Lists version control commands that may be used with matching servers. + // See 'go help vcs'. + GOVCS = "GOVCS" + // GOWORK In module aware mode, use the given go.work file as a workspace file. + // By default or when GOWORK is "auto", the go command searches for a + // file named go.work in the current directory and then containing directories + // until one is found. If a valid go.work file is found, the modules + // specified will collectively be used as the main modules. If GOWORK + // is "off", or a go.work file is not found in "auto" mode, workspace + // mode is disabled. + GOWORK = "GOWORK" + + // GOPRIVATE Comma-separated list of glob patterns (in the syntax of Go's path.Match) + // of module path prefixes that should always be fetched directly + // or that should not be compared against the checksum database. + // See https://golang.org/ref/mod#private-modules. + GOPRIVATE = "GOPRIVATE" + // GONOPROXY Comma-separated list of glob patterns (in the syntax of Go's path.Match) + // of module path prefixes that should always be fetched directly + // or that should not be compared against the checksum database. + // See https://golang.org/ref/mod#private-modules. + GONOPROXY = "GONOPROXY" + // GONOSUMDB Comma-separated list of glob patterns (in the syntax of Go's path.Match) + // of module path prefixes that should always be fetched directly + // or that should not be compared against the checksum database. + // See https://golang.org/ref/mod#private-modules. + GONOSUMDB = "GONOSUMDB" +) + +// Environment variables for use with cgo. +// Reference: https://github.com/golang/go/blob/0afd7e85e5d7154161770f06a17d09bf1ffa3e94/src/cmd/go/internal/help/helpdoc.go#L571 +const ( + // AR The command to use to manipulate library archives when + // building with the gccgo compiler. + // The default is 'ar'. + AR = "AR" + // CC The command to use to compile C code. + CC = "CC" + // CGO_CFLAGS Flags that cgo will pass to the compiler when compiling + // C code. + CGO_CFLAGS = "CGO_CFLAGS" + // CGO_CFLAGS_ALLOW A regular expression specifying additional flags to allow + // to appear in #cgo CFLAGS source code directives. + // Does not apply to the CGO_CFLAGS environment variable. + CGO_CFLAGS_ALLOW = "CGO_CFLAGS_ALLOW" + // CGO_CFLAGS_DISALLOW A regular expression specifying flags that must be disallowed + // from appearing in #cgo CFLAGS source code directives. + // Does not apply to the CGO_CFLAGS environment variable. + CGO_CFLAGS_DISALLOW = "CGO_CFLAGS_DISALLOW" + // CGO_ENABLED Whether the cgo command is supported. Either 0 or 1. + CGO_ENABLED = "CGO_ENABLED" + // CXX The command to use to compile C++ code. + CXX = "CXX" + // FC The command to use to compile Fortran code. + FC = "FC" + // PKG_CONFIG Path to pkg-config tool. + PKG_CONFIG = "PKG_CONFIG" + + // CGO_CPPFLAGS Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the C preprocessor. + CGO_CPPFLAGS = "CGO_CPPFLAGS" + // CGO_CPPFLAGS_ALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the C preprocessor. + CGO_CPPFLAGS_ALLOW = "CGO_CPPFLAGS_ALLOW" + // CGO_CPPFLAGS_DISALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the C preprocessor. + CGO_CPPFLAGS_DISALLOW = "CGO_CPPFLAGS_DISALLOW" + + // CGO_CXXFLAGS Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the C++ compiler. + CGO_CXXFLAGS = "CGO_CXXFLAGS" + // CGO_CXXFLAGS_ALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the C++ compiler. + CGO_CXXFLAGS_ALLOW = "CGO_CXXFLAGS_ALLOW" + // CGO_CXXFLAGS_DISALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the C++ compiler. + CGO_CXXFLAGS_DISALLOW = "CGO_CXXFLAGS_DISALLOW" + + // CGO_FFLAGS Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the Fortran compiler. + CGO_FFLAGS = "CGO_FFLAGS" + // CGO_FFLAGS_ALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the Fortran compiler. + CGO_FFLAGS_ALLOW = "CGO_FFLAGS_ALLOW" + // CGO_FFLAGS_DISALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the Fortran compiler. + CGO_FFLAGS_DISALLOW = "CGO_FFLAGS_DISALLOW" + + // CGO_LDFLAGS Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the linker. + CGO_LDFLAGS = "CGO_LDFLAGS" + // CGO_LDFLAGS_ALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the linker. + CGO_LDFLAGS_ALLOW = "CGO_LDFLAGS_ALLOW" + // CGO_LDFLAGS_DISALLOW Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + // but for the linker. + CGO_LDFLAGS_DISALLOW = "CGO_LDFLAGS_DISALLOW" +) + +// Architecture-specific environment variables. +// Reference: https://github.com/golang/go/blob/0afd7e85e5d7154161770f06a17d09bf1ffa3e94/src/cmd/go/internal/help/helpdoc.go#L611 +const ( + // GO386 For GOARCH=386, how to implement floating point instructions. + // Valid values are sse2 (default), softfloat. + GO386 = "GO386" + // GOAMD64 For GOARCH=amd64, the microarchitecture level for which to compile. + // Valid values are v1 (default), v2, v3, v4. + // See https://golang.org/wiki/MinimumRequirements#amd64 + GOAMD64 = "GOAMD64" + // GOARM For GOARCH=arm, the ARM architecture for which to compile. + // Valid values are 5, 6, 7. + // When the Go tools are built on an arm system, + // the default value is set based on what the build system supports. + // When the Go tools are not built on an arm system + // (that is, when building a cross-compiler), + // the default value is 7. + // The value can be followed by an option specifying how to implement floating point instructions. + // Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7). + GOARM = "GOARM" + // GOARM64 For GOARCH=arm64, the ARM64 architecture for which to compile. + // Valid values are v8.0 (default), v8.{1-9}, v9.{0-5}. + // The value can be followed by an option specifying extensions implemented by target hardware. + // Valid options are ,lse and ,crypto. + // Note that some extensions are enabled by default starting from a certain GOARM64 version; + // for example, lse is enabled by default starting from v8.1. + GOARM64 = "GOARM64" + // GOMIPS For GOARCH=mips{,le}, whether to use floating point instructions. + // Valid values are hardfloat (default), softfloat. + GOMIPS = "GOMIPS" + // GOMIPS64 For GOARCH=mips64{,le}, whether to use floating point instructions. + // Valid values are hardfloat (default), softfloat. + GOMIPS64 = "GOMIPS64" + // GOPPC64 For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture). + // Valid values are power8 (default), power9, power10. + GOPPC64 = "GOPPC64" + // GORISCV64 For GOARCH=riscv64, the RISC-V user-mode application profile for which + // to compile. Valid values are rva20u64 (default), rva22u64. + // See https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc + GORISCV64 = "GORISCV64" + // GOWASM For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use. + // Valid values are satconv, signext. + GOWASM = "GOWASM" +) + +// Environment variables for use with code coverage. +// Reference: https://github.com/golang/go/blob/0afd7e85e5d7154161770f06a17d09bf1ffa3e94/src/cmd/go/internal/help/helpdoc.go#L654 +const ( + // GOCOVERDIR Directory into which to write code coverage data files + // generated by running a "go build -cover" binary. + // Requires that GOEXPERIMENT=coverageredesign is enabled. + GOCOVERDIR = "GOCOVERDIR" +) + +// Special-purpose environment variables. +// Reference: https://github.com/golang/go/blob/0afd7e85e5d7154161770f06a17d09bf1ffa3e94/src/cmd/go/internal/help/helpdoc.go#L661 +const ( + // GCCGOTOOLDIR If set, where to find gccgo tools, such as cgo. + // The default is based on how gccgo was configured. + GCCGOTOOLDIR = "GCCGOTOOLDIR" + // GOEXPERIMENT Comma-separated list of toolchain experiments to enable or disable. + // The list of available experiments may change arbitrarily over time. + // See GOROOT/src/internal/goexperiment/flags.go for currently valid values. + // Warning: This variable is provided for the development and testing + // of the Go toolchain itself. Use beyond that purpose is unsupported. + GOEXPERIMENT = "GOEXPERIMENT" + // GOFIPS140 The FIPS-140 cryptography mode to use when building binaries. + // The default is GOFIPS140=off, which makes no FIPS-140 changes at all. + // Other values enable FIPS-140 compliance measures and select alternate + // versions of the cryptography source code. + // See https://go.dev/security/fips140 for details. + GOFIPS140 = "GOFIPS140" + // GO_EXTLINK_ENABLED Whether the linker should use external linking mode + // when using -linkmode=auto with code that uses cgo. + // Set to 0 to disable external linking mode, 1 to enable it. + GO_EXTLINK_ENABLED = "GO_EXTLINK_ENABLED" + // GIT_ALLOW_PROTOCOL Defined by Git. A colon-separated list of schemes that are allowed + // to be used with git fetch/clone. If set, any scheme not explicitly + // mentioned will be considered insecure by 'go get'. + // Because the variable is defined by Git, the default value cannot + // be set using 'go env -w'. + GIT_ALLOW_PROTOCOL = "GIT_ALLOW_PROTOCOL" +) + +// Additional information available from 'go env' but not read from the environment. +// Reference: https://github.com/golang/go/blob/0afd7e85e5d7154161770f06a17d09bf1ffa3e94/src/cmd/go/internal/help/helpdoc.go#L689 +const ( + // GOEXE The executable file name suffix (".exe" on Windows, "" on other systems). + GOEXE = "GOEXE" + // GOGCCFLAGS A space-separated list of arguments supplied to the CC command. + GOGCCFLAGS = "GOGCCFLAGS" + // GOHOSTARCH The architecture (GOARCH) of the Go toolchain binaries. + GOHOSTARCH = "GOHOSTARCH" + // GOHOSTOS The operating system (GOOS) of the Go toolchain binaries. + GOHOSTOS = "GOHOSTOS" + // GOMOD The absolute path to the go.mod of the main module. + // If module-aware mode is enabled, but there is no go.mod, GOMOD will be + // os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows). + // If module-aware mode is disabled, GOMOD will be the empty string. + GOMOD = "GOMOD" + // GOTELEMETRY The current Go telemetry mode ("off", "local", or "on"). + // See "go help telemetry" for more information. + GOTELEMETRY = "GOTELEMETRY" + // GOTELEMETRYDIR The directory Go telemetry data is written is written to. + GOTELEMETRYDIR = "GOTELEMETRYDIR" + // GOTOOLDIR The directory where the go tools (compile, cover, doc, etc...) are installed. + GOTOOLDIR = "GOTOOLDIR" + // GOVERSION The version of the installed Go tree, as reported by runtime.Version. + GOVERSION = "GOVERSION" +) diff --git a/vendor/github.com/ldez/grignotin/gomod/gomod.go b/vendor/github.com/ldez/grignotin/gomod/gomod.go new file mode 100644 index 000000000..76e17870d --- /dev/null +++ b/vendor/github.com/ldez/grignotin/gomod/gomod.go @@ -0,0 +1,85 @@ +// Package gomod A set of functions to get information about module (go list). +package gomod + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/ldez/grignotin/goenv" + "golang.org/x/mod/modfile" +) + +// ModInfo Module information. +// +//nolint:tagliatelle // temporary: the next version of golangci-lint will allow configuration by package. +type ModInfo struct { + Path string `json:"Path"` + Dir string `json:"Dir"` + GoMod string `json:"GoMod"` + GoVersion string `json:"GoVersion"` + Main bool `json:"Main"` +} + +// GetModuleInfo gets modules information from `go list`. +func GetModuleInfo(ctx context.Context) ([]ModInfo, error) { + // https://github.com/golang/go/issues/44753#issuecomment-790089020 + cmd := exec.CommandContext(ctx, "go", "list", "-m", "-json") + + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("command %q: %w: %s", strings.Join(cmd.Args, " "), err, string(out)) + } + + var infos []ModInfo + + for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); { + var v ModInfo + if err := dec.Decode(&v); err != nil { + return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out)) + } + + if v.GoMod == "" { + return nil, errors.New("working directory is not part of a module") + } + + if !v.Main || v.Dir == "" { + continue + } + + infos = append(infos, v) + } + + if len(infos) == 0 { + return nil, errors.New("go.mod file not found") + } + + return infos, nil +} + +// GetModulePath extracts module path from go.mod. +func GetModulePath(ctx context.Context) (string, error) { + p, err := goenv.GetOne(ctx, goenv.GOMOD) + if err != nil { + return "", err + } + + b, err := os.ReadFile(filepath.Clean(p)) + if err != nil { + return "", fmt.Errorf("reading go.mod: %w", err) + } + + return modfile.ModulePath(b), nil +} + +// GetGoModPath extracts go.mod path from "go env". +// Deprecated: use `goenv.GetOne(context.Background(), goenv.GOMOD)` instead. +func GetGoModPath() (string, error) { + return goenv.GetOne(context.Background(), goenv.GOMOD) +} diff --git a/vendor/github.com/ldez/tagliatelle/.golangci.yml b/vendor/github.com/ldez/tagliatelle/.golangci.yml index ec5c5c766..01c76dca9 100644 --- a/vendor/github.com/ldez/tagliatelle/.golangci.yml +++ b/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -1,7 +1,28 @@ -run: - timeout: 5m - skip-files: [ ] - skip-dirs: [ ] +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - wsl + - nlreturn + - mnd + - err113 + - wrapcheck + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - forcetypeassert + - varnamelen + - nilnil + - errchkjson + - nonamedreturns linters-settings: govet: @@ -9,7 +30,7 @@ linters-settings: disable: - fieldalignment gocyclo: - min-complexity: 15 + min-complexity: 20 goconst: min-len: 5 min-occurrences: 3 @@ -24,11 +45,13 @@ linters-settings: gofumpt: extra-rules: true depguard: - list-type: denylist - include-go-root: false - packages: - - github.com/sirupsen/logrus - - github.com/pkg/errors + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package gocritic: enabled-tags: - diagnostic @@ -43,46 +66,12 @@ linters-settings: hugeParam: sizeThreshold: 100 -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - cyclop # duplicate of gocyclo - - lll - - dupl - - wsl - - nlreturn - - gomnd - - goerr113 - - wrapcheck - - exhaustive - - exhaustruct - - testpackage - - tparallel - - paralleltest - - prealloc - - ifshort - - forcetypeassert - - varnamelen - - nilnil - - errchkjson - - nonamedreturns - issues: exclude-use-default: false - max-per-linter: 0 + max-issues-per-linter: 0 max-same-issues: 0 exclude: - 'package-comments: should have a package comment' + +run: + timeout: 5m diff --git a/vendor/github.com/ldez/tagliatelle/converter.go b/vendor/github.com/ldez/tagliatelle/converter.go new file mode 100644 index 000000000..6005f5b75 --- /dev/null +++ b/vendor/github.com/ldez/tagliatelle/converter.go @@ -0,0 +1,116 @@ +package tagliatelle + +import ( + "fmt" + "strings" + + "github.com/ettle/strcase" +) + +// https://github.com/dominikh/go-tools/blob/v0.5.1/config/config.go#L167-L175 +// +//nolint:gochecknoglobals // For now I'll accept this, but I think will refactor to use a structure. +var staticcheckInitialisms = map[string]bool{ + "AMQP": true, + "DB": true, + "GID": true, + "LHS": false, + "RHS": false, + "RTP": true, + "SIP": true, + "TS": true, +} + +// Converter is the signature of a case converter. +type Converter func(s string) string + +// ConverterCallback allows to abstract `getSimpleConverter` and `ruleToConverter`. +type ConverterCallback func() (Converter, error) + +func getSimpleConverter(c string) (Converter, error) { + switch c { + case "camel": + return strcase.ToCamel, nil + case "pascal": + return strcase.ToPascal, nil + case "kebab": + return strcase.ToKebab, nil + case "snake": + return strcase.ToSnake, nil + case "goCamel": + return strcase.ToGoCamel, nil + case "goPascal": + return strcase.ToGoPascal, nil + case "goKebab": + return strcase.ToGoKebab, nil + case "goSnake": + return strcase.ToGoSnake, nil + case "upperSnake": + return strcase.ToSNAKE, nil + case "header": + return toHeader, nil + case "upper": + return strings.ToUpper, nil + case "lower": + return strings.ToLower, nil + default: + return nil, fmt.Errorf("unsupported case: %s", c) + } +} + +func toHeader(s string) string { + return strcase.ToCase(s, strcase.TitleCase, '-') +} + +func ruleToConverter(rule ExtendedRule) (Converter, error) { + if rule.ExtraInitialisms { + for k, v := range staticcheckInitialisms { + if _, found := rule.InitialismOverrides[k]; found { + continue + } + + rule.InitialismOverrides[k] = v + } + } + + caser := strcase.NewCaser(strings.HasPrefix(rule.Case, "go"), rule.InitialismOverrides, nil) + + switch strings.ToLower(strings.TrimPrefix(rule.Case, "go")) { + case "camel": + return caser.ToCamel, nil + + case "pascal": + return caser.ToPascal, nil + + case "kebab": + return caser.ToKebab, nil + + case "snake": + return caser.ToSnake, nil + + case "uppersnake": + return caser.ToSNAKE, nil + + case "header": + return toHeaderCase(caser), nil + + case "upper": + return func(s string) string { + return caser.ToCase(s, strcase.UpperCase, 0) + }, nil + + case "lower": + return func(s string) string { + return caser.ToCase(s, strcase.LowerCase, 0) + }, nil + + default: + return nil, fmt.Errorf("unsupported case: %s", rule.Case) + } +} + +func toHeaderCase(caser *strcase.Caser) Converter { + return func(s string) string { + return caser.ToCase(s, strcase.TitleCase, '-') + } +} diff --git a/vendor/github.com/ldez/tagliatelle/readme.md b/vendor/github.com/ldez/tagliatelle/readme.md index 55a544db8..52d10304b 100644 --- a/vendor/github.com/ldez/tagliatelle/readme.md +++ b/vendor/github.com/ldez/tagliatelle/readme.md @@ -97,15 +97,14 @@ type Foo struct { } ``` -## What this tool is about +## What this linter is about -This tool is about validating tags according to rules you define. -The tool also allows to fix tags according to the rules you defined. +This linter is about validating tags according to rules you define. +The linter also allows to fix tags according to the rules you defined. -This tool is not intended to validate the fact a tag in valid or not. -To do that, you can use `go vet`, or use [golangci-lint](https://golangci-lint.run) ["go vet"](https://golangci-lint.run/usage/linters/#govet) linter. +This linter is not intended to validate the fact a tag in valid or not. -## How to use the tool +## How to use the linter ### As a golangci-lint linter @@ -114,17 +113,149 @@ Define the rules, you want via your [golangci-lint](https://golangci-lint.run) c ```yaml linters-settings: tagliatelle: - # Check the struck tag name case. + # Checks the struct tag name case. case: - # Use the struct field name to check the name of the struct tag. + # Defines the association between tag name and case. + # Any struct tag name can be used. + # Supported string cases: + # - `camel` + # - `pascal` + # - `kebab` + # - `snake` + # - `upperSnake` + # - `goCamel` + # - `goPascal` + # - `goKebab` + # - `goSnake` + # - `upper` + # - `lower` + # - `header` + rules: + json: camel + yaml: camel + xml: camel + toml: camel + bson: camel + avro: snake + mapstructure: kebab + env: upperSnake + envconfig: upperSnake + whatever: snake + # Defines the association between tag name and case. + # Important: the `extended-rules` overrides `rules`. + # Default: empty + extended-rules: + json: + # Supported string cases: + # - `camel` + # - `pascal` + # - `kebab` + # - `snake` + # - `upperSnake` + # - `goCamel` + # - `goPascal` + # - `goKebab` + # - `goSnake` + # - `header` + # - `lower` + # - `header` + # + # Required + case: camel + # Adds 'AMQP', 'DB', 'GID', 'RTP', 'SIP', 'TS' to initialisms, + # and removes 'LHS', 'RHS' from initialisms. + # Default: false + extra-initialisms: true + # Defines initialism additions and overrides. + # Default: empty + initialism-overrides: + DB: true # add a new initialism + LHS: false # disable a default initialism. + # ... + # Uses the struct field name to check the name of the struct tag. # Default: false use-field-name: true + # The field names to ignore. + # Default: [] + ignored-fields: + - Bar + - Foo + # Overrides the default/root configuration. + # Default: [] + overrides: + - + # The package path (uses `/` only as a separator). + # Required + pkg: foo/bar + # Default: empty or the same as the default/root configuration. + rules: + json: snake + xml: pascal + # Default: empty or the same as the default/root configuration. + extended-rules: + # same options as the base `extended-rules`. + # Default: false (WARNING: it doesn't follow the default/root configuration) + use-field-name: true + # The field names to ignore. + # Default: [] or the same as the default/root configuration. + ignored-fields: + - Bar + - Foo + # Ignore the package (takes precedence over all other configurations). + # Default: false + ignore: true + +``` + +#### Examples + +Overrides case rules for the package `foo/bar`: + +```yaml +linters-settings: + tagliatelle: + case: rules: - # Any struct tag type can be used. - # Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`. json: camel yaml: camel xml: camel + overrides: + - pkg: foo/bar + rules: + json: snake + xml: pascal +``` + +Ignore fields inside the package `foo/bar`: + +```yaml +linters-settings: + tagliatelle: + case: + rules: + json: camel + yaml: camel + xml: camel + overrides: + - pkg: foo/bar + ignored-fields: + - Bar + - Foo +``` + +Ignore the package `foo/bar`: + +```yaml +linters-settings: + tagliatelle: + case: + rules: + json: camel + yaml: camel + xml: camel + overrides: + - pkg: foo/bar + ignore: true ``` More information here https://golangci-lint.run/usage/linters/#tagliatelle @@ -149,13 +280,14 @@ Here are the default rules for the well known and used tags, when using tagliate - `bson`: `camel` - `avro`: `snake` - `header`: `header` +- `env`: `upperSnake` - `envconfig`: `upperSnake` ### Custom Rules -The tool is not limited to the tags used in example, you can use it to validate any tag. +The linter is not limited to the tags used in example, **you can use it to validate any tag**. -You can add your own tag, for example `whatever` and tells the tool you want to use `kebab`. +You can add your own tag, for example `whatever` and tells the linter you want to use `kebab`. This option is only available via [golangci-lint](https://golangci-lint.run). @@ -164,14 +296,15 @@ linters-settings: tagliatelle: # Check the struck tag name case. case: - # Use the struct field name to check the name of the struct tag. - # Default: false - use-field-name: true rules: # Any struct tag type can be used. # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` - json: camel - yaml: camel - xml: camel + json: camel + yaml: camel + xml: camel + toml: camel whatever: kebab + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true ``` diff --git a/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/vendor/github.com/ldez/tagliatelle/tagliatelle.go index 22c5feb3d..99c7da2d0 100644 --- a/vendor/github.com/ldez/tagliatelle/tagliatelle.go +++ b/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -6,10 +6,14 @@ import ( "errors" "fmt" "go/ast" + "maps" + "path" + "path/filepath" "reflect" + "slices" "strings" - "github.com/ettle/strcase" + iradix "github.com/hashicorp/go-immutable-radix/v2" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -17,8 +21,30 @@ import ( // Config the tagliatelle configuration. type Config struct { - Rules map[string]string - UseFieldName bool + Base + Overrides []Overrides +} + +// Overrides applies configuration overrides by package. +type Overrides struct { + Base + Package string +} + +// Base shared configuration between rules. +type Base struct { + Rules map[string]string + ExtendedRules map[string]ExtendedRule + UseFieldName bool + IgnoredFields []string + Ignore bool +} + +// ExtendedRule allows to customize rules. +type ExtendedRule struct { + Case string + ExtraInitialisms bool + InitialismOverrides map[string]bool } // New creates an analyzer. @@ -26,20 +52,18 @@ func New(config Config) *analysis.Analyzer { return &analysis.Analyzer{ Name: "tagliatelle", Doc: "Checks the struct tags.", - Run: func(pass *analysis.Pass) (interface{}, error) { - if len(config.Rules) == 0 { + Run: func(pass *analysis.Pass) (any, error) { + if len(config.Rules) == 0 && len(config.ExtendedRules) == 0 && len(config.Overrides) == 0 { return nil, nil } return run(pass, config) }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - }, + Requires: []*analysis.Analyzer{inspect.Analyzer}, } } -func run(pass *analysis.Pass, config Config) (interface{}, error) { +func run(pass *analysis.Pass, config Config) (any, error) { isp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !ok { return nil, errors.New("missing inspect analyser") @@ -49,6 +73,16 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { (*ast.StructType)(nil), } + cfg := config.Base + if pass.Module != nil { + radixTree := createRadixTree(config, pass.Module.Path) + _, cfg, _ = radixTree.Root().LongestPrefix([]byte(pass.Pkg.Path())) + } + + if cfg.Ignore { + return nil, nil + } + isp.Preorder(nodeFilter, func(n ast.Node) { node, ok := n.(*ast.StructType) if !ok { @@ -56,14 +90,14 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { } for _, field := range node.Fields.List { - analyze(pass, config, node, field) + analyze(pass, cfg, node, field) } }) return nil, nil } -func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.Field) { +func analyze(pass *analysis.Pass, config Base, n *ast.StructType, field *ast.Field) { if n.Fields == nil || n.Fields.NumFields() < 1 { // skip empty structs return @@ -80,49 +114,74 @@ func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.F return } + cleanRules(config) + + if slices.Contains(config.IgnoredFields, fieldName) { + return + } + + for key, extRule := range config.ExtendedRules { + report(pass, config, key, extRule.Case, fieldName, n, field, func() (Converter, error) { + return ruleToConverter(extRule) + }) + } + for key, convName := range config.Rules { - if convName == "" { - continue - } + report(pass, config, key, convName, fieldName, n, field, func() (Converter, error) { + return getSimpleConverter(convName) + }) + } +} - value, flags, ok := lookupTagValue(field.Tag, key) - if !ok { - // skip when no struct tag for the key - continue - } +func report(pass *analysis.Pass, config Base, key, convName, fieldName string, n *ast.StructType, field *ast.Field, fn ConverterCallback) { + if convName == "" { + return + } - if value == "-" { - // skip when skipped :) - continue - } + value, flags, ok := lookupTagValue(field.Tag, key) + if !ok { + // skip when no struct tag for the key + return + } - // TODO(ldez): need to be rethink. - // This is an exception because of a bug. - // https://github.com/ldez/tagliatelle/issues/8 - // For now, tagliatelle should try to remain neutral in terms of format. - if hasTagFlag(flags, "inline") { - // skip for inline children (no name to lint) - continue - } + if value == "-" { + // skip when skipped :) + return + } - if value == "" { - value = fieldName - } + // TODO(ldez): need to be rethink. + // tagliatelle should try to remain neutral in terms of format. + if key == "xml" && strings.ContainsAny(value, ">:") { + // ignore XML names than contains path + return + } - converter, err := getConverter(convName) - if err != nil { - pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) - continue - } + // TODO(ldez): need to be rethink. + // This is an exception because of a bug. + // https://github.com/ldez/tagliatelle/issues/8 + // For now, tagliatelle should try to remain neutral in terms of format. + if hasTagFlag(flags, "inline") { + // skip for inline children (no name to lint) + return + } - expected := value - if config.UseFieldName { - expected = fieldName - } + if value == "" { + value = fieldName + } - if value != converter(expected) { - pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) - } + converter, err := fn() + if err != nil { + pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) + return + } + + expected := value + if config.UseFieldName { + expected = fieldName + } + + if value != converter(expected) { + pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) } } @@ -182,37 +241,62 @@ func hasTagFlag(flags []string, query string) bool { return false } -func getConverter(c string) (func(s string) string, error) { - switch c { - case "camel": - return strcase.ToCamel, nil - case "pascal": - return strcase.ToPascal, nil - case "kebab": - return strcase.ToKebab, nil - case "snake": - return strcase.ToSnake, nil - case "goCamel": - return strcase.ToGoCamel, nil - case "goPascal": - return strcase.ToGoPascal, nil - case "goKebab": - return strcase.ToGoKebab, nil - case "goSnake": - return strcase.ToGoSnake, nil - case "header": - return toHeader, nil - case "upper": - return strings.ToUpper, nil - case "upperSnake": - return strcase.ToSNAKE, nil - case "lower": - return strings.ToLower, nil - default: - return nil, fmt.Errorf("unsupported case: %s", c) +func createRadixTree(config Config, modPath string) *iradix.Tree[Base] { + r := iradix.New[Base]() + + defaultRule := Base{ + Rules: maps.Clone(config.Rules), + ExtendedRules: maps.Clone(config.ExtendedRules), + UseFieldName: config.UseFieldName, + Ignore: config.Ignore, } + + defaultRule.IgnoredFields = append(defaultRule.IgnoredFields, config.IgnoredFields...) + + r, _, _ = r.Insert([]byte(""), defaultRule) + + for _, override := range config.Overrides { + c := Base{ + UseFieldName: override.UseFieldName, + Ignore: override.Ignore, + } + + // If there is an override the base configuration is ignored. + if len(override.IgnoredFields) == 0 { + c.IgnoredFields = append(c.IgnoredFields, config.IgnoredFields...) + } else { + c.IgnoredFields = append(c.IgnoredFields, override.IgnoredFields...) + } + + // Copy the rules from the base. + c.Rules = maps.Clone(config.Rules) + + // Overrides the rule from the base. + for k, v := range override.Rules { + c.Rules[k] = v + } + + // Copy the extended rules from the base. + c.ExtendedRules = maps.Clone(config.ExtendedRules) + + // Overrides the extended rule from the base. + for k, v := range override.ExtendedRules { + c.ExtendedRules[k] = v + } + + key := path.Join(modPath, override.Package) + if filepath.Base(modPath) == override.Package { + key = modPath + } + + r, _, _ = r.Insert([]byte(key), c) + } + + return r } -func toHeader(s string) string { - return strcase.ToCase(s, strcase.TitleCase, '-') +func cleanRules(config Base) { + for k := range config.ExtendedRules { + delete(config.Rules, k) + } } diff --git a/vendor/github.com/ldez/usetesting/.gitignore b/vendor/github.com/ldez/usetesting/.gitignore new file mode 100644 index 000000000..0907a9069 --- /dev/null +++ b/vendor/github.com/ldez/usetesting/.gitignore @@ -0,0 +1,2 @@ +/usetesting +.idea diff --git a/vendor/github.com/ldez/usetesting/.golangci.yml b/vendor/github.com/ldez/usetesting/.golangci.yml new file mode 100644 index 000000000..597647d24 --- /dev/null +++ b/vendor/github.com/ldez/usetesting/.golangci.yml @@ -0,0 +1,83 @@ +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - nlreturn + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - varnamelen + - nilnil + - errchkjson + - nonamedreturns + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + mnd: + ignored-numbers: + - "124" + gocyclo: + min-complexity: 20 + goconst: + min-len: 5 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + wsl: + force-case-trailing-whitespace: 1 + allow-trailing-comment: true + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + settings: + hugeParam: + sizeThreshold: 100 + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 + +output: + show-stats: true + sort-results: true + sort-order: + - linter + - file + +run: + timeout: 5m diff --git a/vendor/github.com/ldez/usetesting/LICENSE b/vendor/github.com/ldez/usetesting/LICENSE new file mode 100644 index 000000000..c1bf0c328 --- /dev/null +++ b/vendor/github.com/ldez/usetesting/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2024 Fernandez Ludovic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ldez/usetesting/Makefile b/vendor/github.com/ldez/usetesting/Makefile new file mode 100644 index 000000000..b8eca6598 --- /dev/null +++ b/vendor/github.com/ldez/usetesting/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -ldflags "-s -w" -trimpath ./cmd/usetesting/ diff --git a/vendor/github.com/ldez/usetesting/readme.md b/vendor/github.com/ldez/usetesting/readme.md new file mode 100644 index 000000000..e21ba06e6 --- /dev/null +++ b/vendor/github.com/ldez/usetesting/readme.md @@ -0,0 +1,209 @@ +# UseTesting + +Detects when some calls can be replaced by methods from the testing package. + +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) + +## Usages + +### Inside golangci-lint + +Recommended. + +```yml +linters-settings: + usetesting: + # Enable/disable `os.CreateTemp("", ...)` detections. + # Default: true + os-create-temp: false + + # Enable/disable `os.MkdirTemp()` detections. + # Default: true + os-mkdir-temp: false + + # Enable/disable `os.Setenv()` detections. + # Default: false + os-setenv: true + + # Enable/disable `os.TempDir()` detections. + # Default: false + os-temp-dir: true + + # Enable/disable `os.Chdir()` detections. + # Disabled if Go < 1.24. + # Default: true + os-chdir: false + + # Enable/disable `context.Background()` detections. + # Disabled if Go < 1.24. + # Default: true + context-background: false + + # Enable/disable `context.TODO()` detections. + # Disabled if Go < 1.24. + # Default: true + context-todo: false +``` + +### As a CLI + +```shell +go install github.com/ldez/usetesting/cmd/usetesting@latest +``` + +``` +usetesting: Reports uses of functions with replacement inside the testing package. + +Usage: usetesting [-flag] [package] + +Flags: + -contextbackground + Enable/disable context.Background() detections (default true) + -contexttodo + Enable/disable context.TODO() detections (default true) + -oschdir + Enable/disable os.Chdir() detections (default true) + -osmkdirtemp + Enable/disable os.MkdirTemp() detections (default true) + -ossetenv + Enable/disable os.Setenv() detections (default false) + -ostempdir + Enable/disable os.TempDir() detections (default false) + -oscreatetemp + Enable/disable os.CreateTemp("", ...) detections (default true) +... +``` + +## Examples + +### `os.MkdirTemp` + +```go +func TestExample(t *testing.T) { + os.MkdirTemp("a", "b") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.TempDir() + // ... +} +``` + +### `os.TempDir` + +```go +func TestExample(t *testing.T) { + os.TempDir() + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.TempDir() + // ... +} +``` + +### `os.CreateTemp` + +```go +func TestExample(t *testing.T) { + os.CreateTemp("", "x") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + os.CreateTemp(t.TempDir(), "x") + // ... +} +``` + +### `os.Setenv` + +```go +func TestExample(t *testing.T) { + os.Setenv("A", "b") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.Setenv("A", "b") + // ... +} +``` + +### `os.Chdir` (Go >= 1.24) + +```go +func TestExample(t *testing.T) { + os.Chdir("x") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.Chdir("x") + // ... +} +``` + +### `context.Background` (Go >= 1.24) + +```go +func TestExample(t *testing.T) { + ctx := context.Background() + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + ctx := t.Context() + // ... +} +``` + +### `context.TODO` (Go >= 1.24) + +```go +func TestExample(t *testing.T) { + ctx := context.TODO() + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + ctx := t.Context() + // ... +} +``` + +## References + +- https://tip.golang.org/doc/go1.15#testingpkgtesting (`TempDir`) +- https://tip.golang.org/doc/go1.17#testingpkgtesting (`SetEnv`) +- https://tip.golang.org/doc/go1.24#testingpkgtesting (`Chdir`, `Context`) diff --git a/vendor/github.com/ldez/usetesting/report.go b/vendor/github.com/ldez/usetesting/report.go new file mode 100644 index 000000000..3c90b6bae --- /dev/null +++ b/vendor/github.com/ldez/usetesting/report.go @@ -0,0 +1,200 @@ +package usetesting + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "slices" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// because [os.CreateTemp] takes 2 args. +const nbArgCreateTemp = 2 + +func (a *analyzer) reportCallExpr(pass *analysis.Pass, ce *ast.CallExpr, fnInfo *FuncInfo) bool { + if !a.osCreateTemp { + return false + } + + if len(ce.Args) != nbArgCreateTemp { + return false + } + + switch fun := ce.Fun.(type) { + case *ast.SelectorExpr: + if fun.Sel == nil || fun.Sel.Name != createTempName { + return false + } + + expr, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + + if expr.Name == osPkgName && isFirstArgEmptyString(ce) { + pass.Report(diagnosticOSCreateTemp(ce, fnInfo)) + + return true + } + + case *ast.Ident: + if fun.Name != createTempName { + return false + } + + pkgName := getPkgNameFromType(pass, fun) + + if pkgName == osPkgName && isFirstArgEmptyString(ce) { + pass.Report(diagnosticOSCreateTemp(ce, fnInfo)) + + return true + } + } + + return false +} + +func diagnosticOSCreateTemp(ce *ast.CallExpr, fnInfo *FuncInfo) analysis.Diagnostic { + diagnostic := analysis.Diagnostic{ + Pos: ce.Pos(), + Message: fmt.Sprintf( + `%s.%s("", ...) could be replaced by %[1]s.%[2]s(%s.%s(), ...) in %s`, + osPkgName, createTempName, fnInfo.ArgName, tempDirName, fnInfo.Name, + ), + } + + // Skip `` arg names. + if !strings.Contains(fnInfo.ArgName, "<") { + g := &ast.CallExpr{ + Fun: ce.Fun, + Args: []ast.Expr{ + &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: &ast.Ident{Name: fnInfo.ArgName}, + Sel: &ast.Ident{Name: tempDirName}, + }, + }, + ce.Args[1], + }, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), g) + if err != nil { + diagnostic.Message = fmt.Sprintf("Suggested fix error: %v", err) + return diagnostic + } + + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: ce.Pos(), + End: ce.End(), + NewText: buf.Bytes(), + }}, + }) + } + + return diagnostic +} + +func (a *analyzer) reportSelector(pass *analysis.Pass, se *ast.SelectorExpr, fnInfo *FuncInfo) bool { + if se.Sel == nil || !se.Sel.IsExported() { + return false + } + + ident, ok := se.X.(*ast.Ident) + if !ok { + return false + } + + return a.report(pass, se, ident.Name, se.Sel.Name, fnInfo) +} + +func (a *analyzer) reportIdent(pass *analysis.Pass, ident *ast.Ident, fnInfo *FuncInfo) bool { + if !ident.IsExported() { + return false + } + + if !slices.Contains(a.fieldNames, ident.Name) { + return false + } + + pkgName := getPkgNameFromType(pass, ident) + + return a.report(pass, ident, pkgName, ident.Name, fnInfo) +} + +//nolint:gocyclo // The complexity is expected by the number of cases to check. +func (a *analyzer) report(pass *analysis.Pass, rg analysis.Range, origPkgName, origName string, fnInfo *FuncInfo) bool { + switch { + case a.osMkdirTemp && origPkgName == osPkgName && origName == mkdirTempName: + report(pass, rg, origPkgName, origName, tempDirName, fnInfo) + + case a.osTempDir && origPkgName == osPkgName && origName == tempDirName: + report(pass, rg, origPkgName, origName, tempDirName, fnInfo) + + case a.osSetenv && origPkgName == osPkgName && origName == setenvName: + report(pass, rg, origPkgName, origName, setenvName, fnInfo) + + case a.geGo124 && a.osChdir && origPkgName == osPkgName && origName == chdirName: + report(pass, rg, origPkgName, origName, chdirName, fnInfo) + + case a.geGo124 && a.contextBackground && origPkgName == contextPkgName && origName == backgroundName: + report(pass, rg, origPkgName, origName, contextName, fnInfo) + + case a.geGo124 && a.contextTodo && origPkgName == contextPkgName && origName == todoName: + report(pass, rg, origPkgName, origName, contextName, fnInfo) + + default: + return false + } + + return true +} + +func report(pass *analysis.Pass, rg analysis.Range, origPkgName, origName, expectName string, fnInfo *FuncInfo) { + diagnostic := analysis.Diagnostic{ + Pos: rg.Pos(), + Message: fmt.Sprintf("%s.%s() could be replaced by %s.%s() in %s", + origPkgName, origName, fnInfo.ArgName, expectName, fnInfo.Name, + ), + } + + // Skip `` arg names. + // Only applies on `context.XXX` because the nb of return parameters is the same as the replacement. + if !strings.Contains(fnInfo.ArgName, "<") && origPkgName == contextPkgName { + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: rg.Pos(), + End: rg.End(), + NewText: []byte(fmt.Sprintf("%s.%s", fnInfo.ArgName, expectName)), + }}, + }) + } + + pass.Report(diagnostic) +} + +func isFirstArgEmptyString(ce *ast.CallExpr) bool { + bl, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return false + } + + return bl.Kind == token.STRING && bl.Value == `""` +} + +func getPkgNameFromType(pass *analysis.Pass, ident *ast.Ident) string { + o := pass.TypesInfo.ObjectOf(ident) + + if o == nil || o.Pkg() == nil { + return "" + } + + return o.Pkg().Name() +} diff --git a/vendor/github.com/ldez/usetesting/usetesting.go b/vendor/github.com/ldez/usetesting/usetesting.go new file mode 100644 index 000000000..725827832 --- /dev/null +++ b/vendor/github.com/ldez/usetesting/usetesting.go @@ -0,0 +1,268 @@ +// Package usetesting It is an analyzer that detects when some calls can be replaced by methods from the testing package. +package usetesting + +import ( + "go/ast" + "go/build" + "os" + "slices" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + chdirName = "Chdir" + mkdirTempName = "MkdirTemp" + createTempName = "CreateTemp" + setenvName = "Setenv" + tempDirName = "TempDir" + backgroundName = "Background" + todoName = "TODO" + contextName = "Context" +) + +const ( + osPkgName = "os" + contextPkgName = "context" + testingPkgName = "testing" +) + +// FuncInfo information about the test function. +type FuncInfo struct { + Name string + ArgName string +} + +// analyzer is the UseTesting linter. +type analyzer struct { + contextBackground bool + contextTodo bool + osChdir bool + osMkdirTemp bool + osTempDir bool + osSetenv bool + osCreateTemp bool + + fieldNames []string + + skipGoVersionDetection bool + geGo124 bool +} + +// NewAnalyzer create a new UseTesting. +func NewAnalyzer() *analysis.Analyzer { + _, skip := os.LookupEnv("USETESTING_SKIP_GO_VERSION_CHECK") // TODO should be removed when go1.25 will be released. + + l := &analyzer{ + fieldNames: []string{ + chdirName, + mkdirTempName, + tempDirName, + setenvName, + backgroundName, + todoName, + createTempName, + }, + skipGoVersionDetection: skip, + } + + a := &analysis.Analyzer{ + Name: "usetesting", + Doc: "Reports uses of functions with replacement inside the testing package.", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: l.run, + } + + a.Flags.BoolVar(&l.contextBackground, "contextbackground", true, "Enable/disable context.Background() detections") + a.Flags.BoolVar(&l.contextTodo, "contexttodo", true, "Enable/disable context.TODO() detections") + a.Flags.BoolVar(&l.osChdir, "oschdir", true, "Enable/disable os.Chdir() detections") + a.Flags.BoolVar(&l.osMkdirTemp, "osmkdirtemp", true, "Enable/disable os.MkdirTemp() detections") + a.Flags.BoolVar(&l.osSetenv, "ossetenv", false, "Enable/disable os.Setenv() detections") + a.Flags.BoolVar(&l.osTempDir, "ostempdir", false, "Enable/disable os.TempDir() detections") + a.Flags.BoolVar(&l.osCreateTemp, "oscreatetemp", true, `Enable/disable os.CreateTemp("", ...) detections`) + + return a +} + +func (a *analyzer) run(pass *analysis.Pass) (any, error) { + if !a.contextBackground && !a.contextTodo && !a.osChdir && !a.osMkdirTemp && !a.osSetenv && !a.osTempDir && !a.osCreateTemp { + return nil, nil + } + + a.geGo124 = a.isGoSupported(pass) + + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, nil + } + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + + insp.WithStack(nodeFilter, func(node ast.Node, push bool, stack []ast.Node) (proceed bool) { + if !push { + return false + } + + switch fn := node.(type) { + case *ast.FuncDecl: + a.checkFunc(pass, fn.Type, fn.Body, fn.Name.Name) + + case *ast.FuncLit: + if hasParentFunc(stack) { + return true + } + + a.checkFunc(pass, fn.Type, fn.Body, "anonymous function") + } + + return true + }) + + return nil, nil +} + +func (a *analyzer) checkFunc(pass *analysis.Pass, ft *ast.FuncType, block *ast.BlockStmt, fnName string) { + if len(ft.Params.List) < 1 { + return + } + + fnInfo := checkTestFunctionSignature(ft.Params.List[0], fnName) + if fnInfo == nil { + return + } + + ast.Inspect(block, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.SelectorExpr: + return !a.reportSelector(pass, v, fnInfo) + + case *ast.Ident: + return !a.reportIdent(pass, v, fnInfo) + + case *ast.CallExpr: + return !a.reportCallExpr(pass, v, fnInfo) + } + + return true + }) +} + +func (a *analyzer) isGoSupported(pass *analysis.Pass) bool { + if a.skipGoVersionDetection { + return true + } + + // Prior to go1.22, versions.FileVersion returns only the toolchain version, + // which is of no use to us, + // so disable this analyzer on earlier versions. + if !slices.Contains(build.Default.ReleaseTags, "go1.22") { + return false + } + + pkgVersion := pass.Pkg.GoVersion() + if pkgVersion == "" { + // Empty means Go devel. + return true + } + + raw := strings.TrimPrefix(pkgVersion, "go") + + // prerelease version (go1.24rc1) + idx := strings.IndexFunc(raw, func(r rune) bool { + return (r < '0' || r > '9') && r != '.' + }) + + if idx != -1 { + raw = raw[:idx] + } + + vParts := strings.Split(raw, ".") + + v, err := strconv.Atoi(strings.Join(vParts[:2], "")) + if err != nil { + v = 116 + } + + return v >= 124 +} + +func hasParentFunc(stack []ast.Node) bool { + // -2 because the last parent is the node. + const skipSelf = 2 + + // skip 0 because it's always [*ast.File]. + for i := len(stack) - skipSelf; i > 0; i-- { + s := stack[i] + + switch fn := s.(type) { + case *ast.FuncDecl: + if len(fn.Type.Params.List) < 1 { + continue + } + + if checkTestFunctionSignature(fn.Type.Params.List[0], fn.Name.Name) != nil { + return true + } + + case *ast.FuncLit: + if len(fn.Type.Params.List) < 1 { + continue + } + + if checkTestFunctionSignature(fn.Type.Params.List[0], "anonymous function") != nil { + return true + } + } + } + + return false +} + +func checkTestFunctionSignature(arg *ast.Field, fnName string) *FuncInfo { + switch at := arg.Type.(type) { + case *ast.StarExpr: + if se, ok := at.X.(*ast.SelectorExpr); ok { + return createFuncInfo(arg, "", se, testingPkgName, fnName, "T", "B") + } + + case *ast.SelectorExpr: + return createFuncInfo(arg, "tb", at, testingPkgName, fnName, "TB") + } + + return nil +} + +func createFuncInfo(arg *ast.Field, defaultName string, se *ast.SelectorExpr, pkgName, fnName string, selectorNames ...string) *FuncInfo { + ok := checkSelectorName(se, pkgName, selectorNames...) + if !ok { + return nil + } + + return &FuncInfo{ + Name: fnName, + ArgName: getTestArgName(arg, defaultName), + } +} + +func checkSelectorName(se *ast.SelectorExpr, pkgName string, selectorNames ...string) bool { + if ident, ok := se.X.(*ast.Ident); ok { + return pkgName == ident.Name && slices.Contains(selectorNames, se.Sel.Name) + } + + return false +} + +func getTestArgName(arg *ast.Field, defaultName string) string { + if len(arg.Names) > 0 && arg.Names[0].Name != "_" { + return arg.Names[0].Name + } + + return defaultName +} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore deleted file mode 100644 index e7081ff52..000000000 --- a/vendor/github.com/magiconair/properties/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sublime-project -*.sublime-workspace -*.un~ -*.swp -.idea/ -*.iml diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24f..000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md deleted file mode 100644 index 79c87e3e6..000000000 --- a/vendor/github.com/magiconair/properties/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013-2020, Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index e2edda025..000000000 --- a/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,128 +0,0 @@ -[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) -[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -# Overview - -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -## Getting Started - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -## Installation and Upgrade - -``` -$ go get -u github.com/magiconair/properties -``` - -## License - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -## ToDo - -* Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 8e6aa441d..000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 7c7979315..000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alternative properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 35d0ae97b..000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index 3d15a1f6e..000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 635368dc8..000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // utf8Default is a private placeholder for the zero value of Encoding to - // ensure that it has the correct meaning. UTF8 is the default encoding but - // was assigned a non-zero value which cannot be changed without breaking - // existing code. Clients should continue to use the public constants. - utf8Default Encoding = iota - - // UTF8 interprets the input data as UTF-8. - UTF8 - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -type Loader struct { - // Encoding determines how the data from files and byte buffers - // is interpreted. For URLs the Content-Type header is used - // to determine the encoding of the data. - Encoding Encoding - - // DisableExpansion configures the property expansion of the - // returned property object. When set to true, the property values - // will not be expanded and the Property object will not be checked - // for invalid expansion expressions. - DisableExpansion bool - - // IgnoreMissing configures whether missing files or URLs which return - // 404 are reported as errors. When set to true, missing files and 404 - // status codes are not reported as errors. - IgnoreMissing bool -} - -// Load reads a buffer into a Properties struct. -func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { - return l.loadBytes(buf, l.Encoding) -} - -// LoadAll reads the content of multiple URLs or files in the given order into -// a Properties struct. If IgnoreMissing is true then a 404 status code or -// missing file will not be reported as error. Encoding sets the encoding for -// files. For the URLs see LoadURL for the Content-Type header and the -// encoding. -func (l *Loader) LoadAll(names []string) (*Properties, error) { - all := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - - var p *Properties - switch { - case strings.HasPrefix(n, "http://"): - p, err = l.LoadURL(n) - case strings.HasPrefix(n, "https://"): - p, err = l.LoadURL(n) - default: - p, err = l.LoadFile(n) - } - if err != nil { - return nil, err - } - all.Merge(p) - } - - all.DisableExpansion = l.DisableExpansion - if all.DisableExpansion { - return all, nil - } - return all, all.check() -} - -// LoadFile reads a file into a Properties struct. -// If IgnoreMissing is true then a missing file will not be -// reported as error. -func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if l.IgnoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - return l.loadBytes(data, l.Encoding) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func (l *Loader) LoadURL(url string) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 && l.IgnoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - ct = strings.Join(strings.Fields(ct), "") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": - enc = ISO_8859_1 - case "", "text/plain;charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - return l.loadBytes(body, enc) -} - -func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - p.DisableExpansion = l.DisableExpansion - if p.DisableExpansion { - return p, nil - } - return p, p.check() -} - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadBytes(buf) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadBytes([]byte(s)) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadAll([]string{filename}) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(filenames) -} - -// LoadURL reads the content of the URL into a Properties struct. -// See Loader#LoadURL for details. -func LoadURL(url string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadAll([]string{url}) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If IgnoreMissing is true then a 404 status code will -// not be reported as error. See Loader#LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} - return l.LoadAll(urls) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(names) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case utf8Default, UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index fccfd39f6..000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index fb2f7b404..000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string - - // WriteSeparator specifies the separator of key and value while writing the properties. - WriteSeparator string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Load reads a buffer into the given Properties struct. -func (p *Properties) Load(buf []byte, enc Encoding) error { - l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} - newProperties, err := l.LoadBytes(buf) - if err != nil { - return err - } - p.Merge(newProperties) - return nil -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(err) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Sort sorts the properties keys in alphabetical order. -// This is helpfully before writing the properties. -func (p *Properties) Sort() { - sort.Strings(p.k) -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) - if err != nil { - return - } - n += x - } - } - } - } - sep := " = " - if p.WriteSeparator != "" { - sep = p.WriteSeparator - } - x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for _, k := range other.k { - if _, ok := p.m[k]; !ok { - p.k = append(p.k, k) - } - } - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - var b bytes.Buffer - b.WriteString("circular reference in:\n") - for _, k1 := range keys { - fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) - } - return "", fmt.Errorf(b.String()) - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - case '\\': - return "\\\\" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index dbd60b36e..000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/matoous/godox/.golangci.yml b/vendor/github.com/matoous/godox/.golangci.yml index 3f0fcdb19..8d080b28a 100644 --- a/vendor/github.com/matoous/godox/.golangci.yml +++ b/vendor/github.com/matoous/godox/.golangci.yml @@ -1,10 +1,4 @@ linters-settings: - depguard: - list-type: blacklist - include-go-root: true - packages: - # we are using "github.com/json-iterator/go" instead of json encoder from stdlib - - "encoding/json" dupl: threshold: 100 gocritic: @@ -19,10 +13,9 @@ linters-settings: - unnamedResult # it is experimental currently and doesn't handle typed channels correctly gocyclo: min-complexity: 14 # TODO go lower - golint: - min-confidence: 0 govet: - check-shadowing: true + enable: + - shadow goconst: min-len: 2 min-occurrences: 3 @@ -30,39 +23,47 @@ linters-settings: local-prefixes: gitlab.skypicker.com/search-team/gonuts/conveyance-store lll: line-length: 140 - maligned: - suggest-new: true misspell: locale: US linters: enable-all: true disable: + - depguard # prealloc is not recommended by `golangci-lint` developers. - prealloc - gochecknoglobals + # deprecated + - maligned + - exhaustivestruct + - nosnakecase + - scopelint + - structcheck + - ifshort + - varcheck + - deadcode + - golint + - interfacer + issues: + exclude-dirs: + - "fixtures" exclude-rules: - path: _test\.go linters: + - exhaustruct - goconst - dupl - - path: fixtures - linters: - - gocritic - - varcheck - - deadcode - - unused - run: modules-download-mode: readonly # output configuration options output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: tab + formats: + - format: tab # print lines of code with issue, default is true print-issued-lines: true diff --git a/vendor/github.com/matoous/godox/.revive.toml b/vendor/github.com/matoous/godox/.revive.toml index db0e4edb6..a4a30464d 100644 --- a/vendor/github.com/matoous/godox/.revive.toml +++ b/vendor/github.com/matoous/godox/.revive.toml @@ -1,5 +1,6 @@ ignoreGeneratedHeader = false severity = "warning" +exclude = ["./fixtures/..."] # confidence <= 0.2 generate a lot of errors from package-comments rule. It marks files that do not contain # package-level comments as a warning irrespective of existing package-level coment in one file. diff --git a/vendor/github.com/matoous/godox/Makefile b/vendor/github.com/matoous/godox/Makefile new file mode 100644 index 000000000..694aa21d6 --- /dev/null +++ b/vendor/github.com/matoous/godox/Makefile @@ -0,0 +1,20 @@ +## Help display. +## Pulls comments from beside commands and prints a nicely formatted +## display with the commands and their usage information. + +.DEFAULT_GOAL := help + +help: ## Prints this help + @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: lint +lint: ## Lint the application + golangci-lint run --max-same-issues=0 --timeout=1m ./... + +.PHONY: test +test: ## Run unit tests + go test -race -shuffle=on ./... + +.PHONY: vet +vet: ## Run go vet + go vet ./... diff --git a/vendor/github.com/matoous/godox/godox.go b/vendor/github.com/matoous/godox/godox.go index 3903525c8..5bcc7e980 100644 --- a/vendor/github.com/matoous/godox/godox.go +++ b/vendor/github.com/matoous/godox/godox.go @@ -1,3 +1,5 @@ +// Package godox is a linter that scans Go code for comments containing certain keywords +// (like TODO, BUG, FIXME) which typically indicate areas that require attention. package godox import ( @@ -20,22 +22,17 @@ type Message struct { Message string } -func getMessages(comment *ast.Comment, fset *token.FileSet, keywords []string) []Message { +func getMessages(comment *ast.Comment, fset *token.FileSet, keywords []string) ([]Message, error) { commentText := extractComment(comment.Text) - b := bufio.NewReader(bytes.NewBufferString(commentText)) + scanner := bufio.NewScanner(bytes.NewBufferString(commentText)) var comments []Message - for lineNum := 0; ; lineNum++ { - line, _, err := b.ReadLine() - if err != nil { - break - } - + for lineNum := 0; scanner.Scan(); lineNum++ { const minimumSize = 4 - sComment := bytes.TrimSpace(line) + sComment := bytes.TrimSpace(scanner.Bytes()) if len(sComment) < minimumSize { continue } @@ -68,21 +65,22 @@ func getMessages(comment *ast.Comment, fset *token.FileSet, keywords []string) [ } } - return comments + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("scan: %w", err) + } + + return comments, nil } func extractComment(commentText string) string { switch commentText[1] { case '/': - commentText = commentText[2:] - if len(commentText) > 0 && commentText[0] == ' ' { - commentText = commentText[1:] - } + return strings.TrimPrefix(commentText[2:], " ") case '*': - commentText = commentText[2 : len(commentText)-2] + return commentText[2 : len(commentText)-2] + default: + return commentText } - - return commentText } func hasAlphanumRuneAdjacent(rest []byte) bool { @@ -102,7 +100,7 @@ func hasAlphanumRuneAdjacent(rest []byte) bool { // Run runs the godox linter on given file. // Godox searches for comments starting with given keywords and reports them. -func Run(file *ast.File, fset *token.FileSet, keywords ...string) []Message { +func Run(file *ast.File, fset *token.FileSet, keywords ...string) ([]Message, error) { if len(keywords) == 0 { keywords = defaultKeywords } @@ -111,9 +109,14 @@ func Run(file *ast.File, fset *token.FileSet, keywords ...string) []Message { for _, c := range file.Comments { for _, ci := range c.List { - messages = append(messages, getMessages(ci, fset, keywords)...) + msgs, err := getMessages(ci, fset, keywords) + if err != nil { + return nil, err + } + + messages = append(messages, msgs...) } } - return messages + return messages, nil } diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go deleted file mode 100644 index 416d1bbbf..000000000 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build appengine -// +build appengine - -package colorable - -import ( - "io" - "os" - - _ "github.com/mattn/go-isatty" -) - -// NewColorable returns new instance of Writer which handles escape sequence. -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. -func NewColorableStdout() io.Writer { - return os.Stdout -} - -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. -func NewColorableStderr() io.Writer { - return os.Stderr -} - -// EnableColorsStdout enable colors if possible. -func EnableColorsStdout(enabled *bool) func() { - if enabled != nil { - *enabled = true - } - return func() {} -} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 766d94603..c1a78aa94 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -1,5 +1,5 @@ -//go:build !windows && !appengine -// +build !windows,!appengine +//go:build !windows || appengine +// +build !windows appengine package colorable diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 1846ad5ab..2df7b8598 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" "sync" - "syscall" + syscall "golang.org/x/sys/windows" "unsafe" "github.com/mattn/go-isatty" @@ -73,7 +73,7 @@ type consoleCursorInfo struct { } var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") + kernel32 = syscall.NewLazySystemDLL("kernel32.dll") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") @@ -87,8 +87,8 @@ var ( procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) -// Writer provides colorable Writer to the console -type Writer struct { +// writer provides colorable Writer to the console +type writer struct { out io.Writer handle syscall.Handle althandle syscall.Handle @@ -98,7 +98,7 @@ type Writer struct { mutex sync.Mutex } -// NewColorable returns new instance of Writer which handles escape sequence from File. +// NewColorable returns new instance of writer which handles escape sequence from File. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") @@ -112,17 +112,17 @@ func NewColorable(file *os.File) io.Writer { var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + return &writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} } return file } -// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +// NewColorableStdout returns new instance of writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return NewColorable(os.Stdout) } -// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +// NewColorableStderr returns new instance of writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return NewColorable(os.Stderr) } @@ -434,7 +434,7 @@ func atoiWithDefault(s string, def int) (int, error) { } // Write writes data on console -func (w *Writer) Write(data []byte) (n int, err error) { +func (w *writer) Write(data []byte) (n int, err error) { w.mutex.Lock() defer w.mutex.Unlock() var csbi consoleScreenBufferInfo @@ -560,7 +560,7 @@ loop: } procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } @@ -569,7 +569,7 @@ loop: csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'F': - n, err = strconv.Atoi(buf.String()) + n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } diff --git a/vendor/github.com/mgechev/revive/config/config.go b/vendor/github.com/mgechev/revive/config/config.go index 16559f5ec..34340b71c 100644 --- a/vendor/github.com/mgechev/revive/config/config.go +++ b/vendor/github.com/mgechev/revive/config/config.go @@ -98,18 +98,23 @@ var allRules = append([]lint.Rule{ &rule.CommentsDensityRule{}, &rule.FileLengthLimitRule{}, &rule.FilenameFormatRule{}, + &rule.RedundantBuildTagRule{}, + &rule.UseErrorsNewRule{}, + &rule.RedundantTestMainExitRule{}, }, defaultRules...) +// allFormatters is a list of all available formatters to output the linting results. +// Keep the list sorted and in sync with available formatters in README.md. var allFormatters = []lint.Formatter{ - &formatter.Stylish{}, + &formatter.Checkstyle{}, + &formatter.Default{}, &formatter.Friendly{}, &formatter.JSON{}, &formatter.NDJSON{}, - &formatter.Default{}, - &formatter.Unix{}, - &formatter.Checkstyle{}, &formatter.Plain{}, &formatter.Sarif{}, + &formatter.Stylish{}, + &formatter.Unix{}, } func getFormatters() map[string]lint.Formatter { @@ -145,6 +150,12 @@ func GetLintingRules(config *lint.Config, extraRules []lint.Rule) ([]lint.Rule, continue // skip disabled rules } + if r, ok := r.(lint.ConfigurableRule); ok { + if err := r.Configure(ruleConfig.Arguments); err != nil { + return nil, fmt.Errorf("cannot configure rule: %q: %w", name, err) + } + } + lintingRules = append(lintingRules, r) } @@ -165,14 +176,14 @@ func parseConfig(path string, config *lint.Config) error { if err != nil { return errors.New("cannot read the config file") } - _, err = toml.Decode(string(file), config) + err = toml.Unmarshal(file, config) if err != nil { - return fmt.Errorf("cannot parse the config file: %v", err) + return fmt.Errorf("cannot parse the config file: %w", err) } for k, r := range config.Rules { err := r.Initialize() if err != nil { - return fmt.Errorf("error in config of rule [%s] : [%v]", k, err) + return fmt.Errorf("error in config of rule [%s] : [%w]", k, err) } config.Rules[k] = r } @@ -238,15 +249,14 @@ func GetConfig(configPath string) (*lint.Config, error) { // GetFormatter yields the formatter for lint failures func GetFormatter(formatterName string) (lint.Formatter, error) { formatters := getFormatters() - fmtr := formatters["default"] - if formatterName != "" { - f, ok := formatters[formatterName] - if !ok { - return nil, fmt.Errorf("unknown formatter %v", formatterName) - } - fmtr = f + if formatterName == "" { + return formatters["default"], nil + } + f, ok := formatters[formatterName] + if !ok { + return nil, fmt.Errorf("unknown formatter %v", formatterName) } - return fmtr, nil + return f, nil } func defaultConfig() *lint.Config { diff --git a/vendor/github.com/mgechev/revive/formatter/checkstyle.go b/vendor/github.com/mgechev/revive/formatter/checkstyle.go index f45b63c92..8fe85fae5 100644 --- a/vendor/github.com/mgechev/revive/formatter/checkstyle.go +++ b/vendor/github.com/mgechev/revive/formatter/checkstyle.go @@ -45,7 +45,7 @@ func (*Checkstyle) Format(failures <-chan lint.Failure, config lint.Config) (str } fn := failure.GetFilename() if issues[fn] == nil { - issues[fn] = make([]issue, 0) + issues[fn] = []issue{} } issues[fn] = append(issues[fn], iss) } diff --git a/vendor/github.com/mgechev/revive/formatter/default.go b/vendor/github.com/mgechev/revive/formatter/default.go index 2d5a04434..7af4aad06 100644 --- a/vendor/github.com/mgechev/revive/formatter/default.go +++ b/vendor/github.com/mgechev/revive/formatter/default.go @@ -26,3 +26,7 @@ func (*Default) Format(failures <-chan lint.Failure, _ lint.Config) (string, err } return buf.String(), nil } + +func ruleDescriptionURL(ruleName string) string { + return "https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#" + ruleName +} diff --git a/vendor/github.com/mgechev/revive/formatter/friendly.go b/vendor/github.com/mgechev/revive/formatter/friendly.go index 5ff329a23..9c1a0f617 100644 --- a/vendor/github.com/mgechev/revive/formatter/friendly.go +++ b/vendor/github.com/mgechev/revive/formatter/friendly.go @@ -2,9 +2,11 @@ package formatter import ( "bytes" + "cmp" "fmt" "io" - "sort" + "slices" + "strings" "github.com/fatih/color" "github.com/mgechev/revive/lint" @@ -32,7 +34,7 @@ func (*Friendly) Name() string { // Format formats the failures gotten from the lint. func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { - var buf bytes.Buffer + var buf strings.Builder errorMap := map[string]int{} warningMap := map[string]int{} totalErrors := 0 @@ -40,38 +42,38 @@ func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (str for failure := range failures { sev := severity(config, failure) f.printFriendlyFailure(&buf, failure, sev) - if sev == lint.SeverityWarning { + switch sev { + case lint.SeverityWarning: warningMap[failure.RuleName]++ totalWarnings++ - } - if sev == lint.SeverityError { + case lint.SeverityError: errorMap[failure.RuleName]++ totalErrors++ } } + f.printSummary(&buf, totalErrors, totalWarnings) f.printStatistics(&buf, color.RedString("Errors:"), errorMap) f.printStatistics(&buf, color.YellowString("Warnings:"), warningMap) return buf.String(), nil } -func (f *Friendly) printFriendlyFailure(w io.Writer, failure lint.Failure, severity lint.Severity) { - f.printHeaderRow(w, failure, severity) - f.printFilePosition(w, failure) - fmt.Fprintln(w) - fmt.Fprintln(w) +func (f *Friendly) printFriendlyFailure(sb *strings.Builder, failure lint.Failure, severity lint.Severity) { + f.printHeaderRow(sb, failure, severity) + f.printFilePosition(sb, failure) + sb.WriteString("\n\n") } -func (f *Friendly) printHeaderRow(w io.Writer, failure lint.Failure, severity lint.Severity) { +func (f *Friendly) printHeaderRow(sb *strings.Builder, failure lint.Failure, severity lint.Severity) { emoji := getWarningEmoji() if severity == lint.SeverityError { emoji = getErrorEmoji() } - fmt.Fprint(w, f.table([][]string{{emoji, "https://revive.run/r#" + failure.RuleName, color.GreenString(failure.Failure)}})) + sb.WriteString(f.table([][]string{{emoji, ruleDescriptionURL(failure.RuleName), color.GreenString(failure.Failure)}})) } -func (*Friendly) printFilePosition(w io.Writer, failure lint.Failure) { - fmt.Fprintf(w, " %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column) +func (*Friendly) printFilePosition(sb *strings.Builder, failure lint.Failure) { + sb.WriteString(fmt.Sprintf(" %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column)) } type statEntry struct { @@ -98,13 +100,11 @@ func (*Friendly) printSummary(w io.Writer, errors, warnings int) { } str := fmt.Sprintf("%d %s (%d %s, %d %s)", errors+warnings, problemsLabel, errors, errorsLabel, warnings, warningsLabel) if errors > 0 { - fmt.Fprintf(w, "%s %s\n", emoji, color.RedString(str)) - fmt.Fprintln(w) + fmt.Fprintf(w, "%s %s\n\n", emoji, color.RedString(str)) return } if warnings > 0 { - fmt.Fprintf(w, "%s %s\n", emoji, color.YellowString(str)) - fmt.Fprintln(w) + fmt.Fprintf(w, "%s %s\n\n", emoji, color.YellowString(str)) return } } @@ -113,12 +113,12 @@ func (f *Friendly) printStatistics(w io.Writer, header string, stats map[string] if len(stats) == 0 { return } - var data []statEntry + data := make([]statEntry, 0, len(stats)) for name, total := range stats { data = append(data, statEntry{name, total}) } - sort.Slice(data, func(i, j int) bool { - return data[i].failures > data[j].failures + slices.SortFunc(data, func(a, b statEntry) int { + return -cmp.Compare(a.failures, b.failures) }) formatted := [][]string{} for _, entry := range data { diff --git a/vendor/github.com/mgechev/revive/formatter/plain.go b/vendor/github.com/mgechev/revive/formatter/plain.go index 09ebf6cdc..351248742 100644 --- a/vendor/github.com/mgechev/revive/formatter/plain.go +++ b/vendor/github.com/mgechev/revive/formatter/plain.go @@ -1,8 +1,8 @@ package formatter import ( - "bytes" "fmt" + "strings" "github.com/mgechev/revive/lint" ) @@ -20,9 +20,9 @@ func (*Plain) Name() string { // Format formats the failures gotten from the lint. func (*Plain) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { - var buf bytes.Buffer + var sb strings.Builder for failure := range failures { - fmt.Fprintf(&buf, "%v: %s %s\n", failure.Position.Start, failure.Failure, "https://revive.run/r#"+failure.RuleName) + sb.WriteString(fmt.Sprintf("%v: %s %s\n", failure.Position.Start, failure.Failure, ruleDescriptionURL(failure.RuleName))) } - return buf.String(), nil + return sb.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/sarif.go b/vendor/github.com/mgechev/revive/formatter/sarif.go index c42da73eb..72da16071 100644 --- a/vendor/github.com/mgechev/revive/formatter/sarif.go +++ b/vendor/github.com/mgechev/revive/formatter/sarif.go @@ -20,7 +20,7 @@ func (*Sarif) Name() string { return "sarif" } -const reviveSite = "https://revive.run" +const reviveSite = "https://github.com/mgechev/revive" // Format formats the failures gotten from the lint. func (*Sarif) Format(failures <-chan lint.Failure, cfg lint.Config) (string, error) { diff --git a/vendor/github.com/mgechev/revive/formatter/stylish.go b/vendor/github.com/mgechev/revive/formatter/stylish.go index 828228c72..bb3d7cd18 100644 --- a/vendor/github.com/mgechev/revive/formatter/stylish.go +++ b/vendor/github.com/mgechev/revive/formatter/stylish.go @@ -22,11 +22,12 @@ func (*Stylish) Name() string { func formatFailure(failure lint.Failure, severity lint.Severity) []string { fString := color.CyanString(failure.Failure) - fName := color.RedString("https://revive.run/r#" + failure.RuleName) + fURL := ruleDescriptionURL(failure.RuleName) + fName := color.RedString(fURL) lineColumn := failure.Position pos := fmt.Sprintf("(%d, %d)", lineColumn.Start.Line, lineColumn.Start.Column) if severity == lint.SeverityWarning { - fName = color.YellowString("https://revive.run/r#" + failure.RuleName) + fName = color.YellowString(fURL) } return []string{failure.GetFilename(), pos, fName, fString} } @@ -50,7 +51,7 @@ func (*Stylish) Format(failures <-chan lint.Failure, config lint.Config) (string ps = "problem" } - fileReport := make(map[string][][]string) + fileReport := map[string][][]string{} for _, row := range result { if _, ok := fileReport[row[0]]; !ok { @@ -77,11 +78,12 @@ func (*Stylish) Format(failures <-chan lint.Failure, config lint.Config) (string suffix := fmt.Sprintf(" %d %s (%d errors) (%d warnings)", total, ps, totalErrors, total-totalErrors) - if total > 0 && totalErrors > 0 { + switch { + case total > 0 && totalErrors > 0: suffix = color.RedString("\n ✖" + suffix) - } else if total > 0 && totalErrors == 0 { + case total > 0 && totalErrors == 0: suffix = color.YellowString("\n ✖" + suffix) - } else { + default: suffix, output = "", "" } diff --git a/vendor/github.com/mgechev/revive/formatter/unix.go b/vendor/github.com/mgechev/revive/formatter/unix.go index e46f3c275..9ce8fee4d 100644 --- a/vendor/github.com/mgechev/revive/formatter/unix.go +++ b/vendor/github.com/mgechev/revive/formatter/unix.go @@ -1,8 +1,8 @@ package formatter import ( - "bytes" "fmt" + "strings" "github.com/mgechev/revive/lint" ) @@ -22,9 +22,9 @@ func (*Unix) Name() string { // Format formats the failures gotten from the lint. func (*Unix) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { - var buf bytes.Buffer + var sb strings.Builder for failure := range failures { - fmt.Fprintf(&buf, "%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure) + sb.WriteString(fmt.Sprintf("%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure)) } - return buf.String(), nil + return sb.String(), nil } diff --git a/vendor/github.com/mgechev/revive/internal/astutils/ast_utils.go b/vendor/github.com/mgechev/revive/internal/astutils/ast_utils.go new file mode 100644 index 000000000..0a346043a --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/astutils/ast_utils.go @@ -0,0 +1,82 @@ +// Package astutils provides utility functions for working with AST nodes +package astutils + +import ( + "go/ast" +) + +// FuncSignatureIs returns true if the given func decl satisfies a signature characterized +// by the given name, parameters types and return types; false otherwise. +// +// Example: to check if a function declaration has the signature Foo(int, string) (bool,error) +// call to FuncSignatureIs(funcDecl,"Foo",[]string{"int","string"},[]string{"bool","error"}) +func FuncSignatureIs(funcDecl *ast.FuncDecl, wantName string, wantParametersTypes, wantResultsTypes []string) bool { + if wantName != funcDecl.Name.String() { + return false // func name doesn't match expected one + } + + funcParametersTypes := getTypeNames(funcDecl.Type.Params) + if len(wantParametersTypes) != len(funcParametersTypes) { + return false // func has not the expected number of parameters + } + + funcResultsTypes := getTypeNames(funcDecl.Type.Results) + if len(wantResultsTypes) != len(funcResultsTypes) { + return false // func has not the expected number of return values + } + + for i, wantType := range wantParametersTypes { + if wantType != funcParametersTypes[i] { + return false // type of a func's parameter does not match the type of the corresponding expected parameter + } + } + + for i, wantType := range wantResultsTypes { + if wantType != funcResultsTypes[i] { + return false // type of a func's return value does not match the type of the corresponding expected return value + } + } + + return true +} + +func getTypeNames(fields *ast.FieldList) []string { + result := []string{} + + if fields == nil { + return result + } + + for _, field := range fields.List { + typeName := getFieldTypeName(field.Type) + if field.Names == nil { // unnamed field + result = append(result, typeName) + continue + } + + for range field.Names { // add one type name for each field name + result = append(result, typeName) + } + } + + return result +} + +func getFieldTypeName(typ ast.Expr) string { + switch f := typ.(type) { + case *ast.Ident: + return f.Name + case *ast.SelectorExpr: + return f.Sel.Name + "." + getFieldTypeName(f.X) + case *ast.StarExpr: + return "*" + getFieldTypeName(f.X) + case *ast.IndexExpr: + return getFieldTypeName(f.X) + "[" + getFieldTypeName(f.Index) + "]" + case *ast.ArrayType: + return "[]" + getFieldTypeName(f.Elt) + case *ast.InterfaceType: + return "interface{}" + default: + return "UNHANDLED_TYPE" + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/args.go b/vendor/github.com/mgechev/revive/internal/ifelse/args.go index c6e647e69..fc65b70a3 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/args.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/args.go @@ -4,8 +4,15 @@ package ifelse // that would enlarge variable scope const PreserveScope = "preserveScope" +// AllowJump is a configuration argument that permits early-return to +// suggest introducing a new jump (return, continue, etc) statement +// to reduce nesting. By default, suggestions only bring existing jumps +// earlier. +const AllowJump = "allowJump" + // Args contains arguments common to the early-return, indent-error-flow -// and superfluous-else rules (currently just preserveScope) +// and superfluous-else rules type Args struct { PreserveScope bool + AllowJump bool } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/branch.go b/vendor/github.com/mgechev/revive/internal/ifelse/branch.go index 6e6036b89..dfa744e35 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/branch.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/branch.go @@ -9,8 +9,8 @@ import ( // Branch contains information about a branch within an if-else chain. type Branch struct { BranchKind - Call // The function called at the end for kind Panic or Exit. - HasDecls bool // The branch has one or more declarations (at the top level block) + Call // The function called at the end for kind Panic or Exit. + block []ast.Stmt } // BlockBranch gets the Branch of an ast.BlockStmt. @@ -21,7 +21,7 @@ func BlockBranch(block *ast.BlockStmt) Branch { } branch := StmtBranch(block.List[blockLen-1]) - branch.HasDecls = hasDecls(block) + branch.block = block.List return branch } @@ -61,11 +61,14 @@ func StmtBranch(stmt ast.Stmt) Branch { // String returns a brief string representation func (b Branch) String() string { switch b.BranchKind { + case Empty: + return "{ }" + case Regular: + return "{ ... }" case Panic, Exit: - return fmt.Sprintf("... %v()", b.Call) - default: - return b.BranchKind.String() + return fmt.Sprintf("{ ... %v() }", b.Call) } + return fmt.Sprintf("{ ... %v }", b.BranchKind) } // LongString returns a longer form string representation @@ -73,13 +76,13 @@ func (b Branch) LongString() string { switch b.BranchKind { case Panic, Exit: return fmt.Sprintf("call to %v function", b.Call) - default: - return b.BranchKind.LongString() } + return b.BranchKind.LongString() } -func hasDecls(block *ast.BlockStmt) bool { - for _, stmt := range block.List { +// HasDecls returns whether the branch has any top-level declarations +func (b Branch) HasDecls() bool { + for _, stmt := range b.block { switch stmt := stmt.(type) { case *ast.DeclStmt: return true @@ -91,3 +94,22 @@ func hasDecls(block *ast.BlockStmt) bool { } return false } + +// IsShort returns whether the branch is empty or consists of a single statement +func (b Branch) IsShort() bool { + switch len(b.block) { + case 0: + return true + case 1: + return isShortStmt(b.block[0]) + } + return false +} + +func isShortStmt(stmt ast.Stmt) bool { + switch stmt.(type) { + case *ast.BlockStmt, *ast.IfStmt, *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt, *ast.ForStmt, *ast.RangeStmt: + return false + } + return true +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go b/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go index 41601d1e1..75d3b0cfe 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go @@ -44,9 +44,8 @@ func (k BranchKind) Deviates() bool { return false case Return, Continue, Break, Goto, Panic, Exit: return true - default: - panic("invalid kind") } + panic("invalid kind") } // Branch returns a Branch with the given kind @@ -58,22 +57,21 @@ func (k BranchKind) String() string { case Empty: return "" case Regular: - return "..." + return "" case Return: - return "... return" + return "return" case Continue: - return "... continue" + return "continue" case Break: - return "... break" + return "break" case Goto: - return "... goto" + return "goto" case Panic: - return "... panic()" + return "panic()" case Exit: - return "... os.Exit()" - default: - panic("invalid kind") + return "os.Exit()" } + panic("invalid kind") } // LongString returns a longer form string representation @@ -95,7 +93,6 @@ func (k BranchKind) LongString() string { return "a function call that panics" case Exit: return "a function call that exits the program" - default: - panic("invalid kind") } + panic("invalid kind") } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/chain.go b/vendor/github.com/mgechev/revive/internal/ifelse/chain.go index 9891635ee..e3c8898ce 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/chain.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/chain.go @@ -2,9 +2,11 @@ package ifelse // Chain contains information about an if-else chain. type Chain struct { - If Branch // what happens at the end of the "if" block - Else Branch // what happens at the end of the "else" block - HasInitializer bool // is there an "if"-initializer somewhere in the chain? - HasPriorNonDeviating bool // is there a prior "if" block that does NOT deviate control flow? - AtBlockEnd bool // whether the chain is placed at the end of the surrounding block + If Branch // what happens at the end of the "if" block + HasElse bool // is there an "else" block? + Else Branch // what happens at the end of the "else" block + HasInitializer bool // is there an "if"-initializer somewhere in the chain? + HasPriorNonDeviating bool // is there a prior "if" block that does NOT deviate control flow? + AtBlockEnd bool // whether the chain is placed at the end of the surrounding block + BlockEndKind BranchKind // control flow at end of surrounding block (e.g. "return" for function body) } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/doc.go b/vendor/github.com/mgechev/revive/internal/ifelse/doc.go index 0aa2c9817..7461b12aa 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/doc.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/doc.go @@ -1,4 +1,4 @@ -// Package ifelse provides helpers for analysing the control flow in if-else chains, +// Package ifelse provides helpers for analyzing the control flow in if-else chains, // presently used by the following rules: // - early-return // - indent-error-flow diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/func.go b/vendor/github.com/mgechev/revive/internal/ifelse/func.go index 7ba351918..45c78f079 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/func.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/func.go @@ -42,10 +42,8 @@ func ExprCall(expr *ast.ExprStmt) (Call, bool) { // String returns the function name with package qualifier (if any) func (f Call) String() string { - switch { - case f.Pkg != "": + if f.Pkg != "" { return fmt.Sprintf("%s.%s", f.Pkg, f.Name) - default: - return f.Name } + return f.Name } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/rule.go b/vendor/github.com/mgechev/revive/internal/ifelse/rule.go index 07ad456b6..94f022180 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/rule.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/rule.go @@ -7,10 +7,10 @@ import ( "github.com/mgechev/revive/lint" ) -// Rule is an interface for linters operating on if-else chains -type Rule interface { - CheckIfElse(chain Chain, args Args) (failMsg string) -} +// CheckFunc evaluates a rule against the given if-else chain and returns a message +// describing the proposed refactor, along with a indicator of whether such a refactor +// could be found. +type CheckFunc func(Chain, Args) (string, bool) // Apply evaluates the given Rule on if-else chains found within the given AST, // and returns the failures. @@ -28,11 +28,14 @@ type Rule interface { // // Only the block following "bar" is linted. This is because the rules that use this function // do not presently have anything to say about earlier blocks in the chain. -func Apply(rule Rule, node ast.Node, target Target, args lint.Arguments) []lint.Failure { - v := &visitor{rule: rule, target: target} +func Apply(check CheckFunc, node ast.Node, target Target, args lint.Arguments) []lint.Failure { + v := &visitor{check: check, target: target} for _, arg := range args { - if arg == PreserveScope { + switch arg { + case PreserveScope: v.args.PreserveScope = true + case AllowJump: + v.args.AllowJump = true } } ast.Walk(v, node) @@ -42,64 +45,99 @@ func Apply(rule Rule, node ast.Node, target Target, args lint.Arguments) []lint. type visitor struct { failures []lint.Failure target Target - rule Rule + check CheckFunc args Args } func (v *visitor) Visit(node ast.Node) ast.Visitor { - block, ok := node.(*ast.BlockStmt) - if !ok { + switch stmt := node.(type) { + case *ast.FuncDecl: + v.visitBody(stmt.Body, Return) + case *ast.FuncLit: + v.visitBody(stmt.Body, Return) + case *ast.ForStmt: + v.visitBody(stmt.Body, Continue) + case *ast.RangeStmt: + v.visitBody(stmt.Body, Continue) + case *ast.CaseClause: + v.visitBlock(stmt.Body, Break) + case *ast.BlockStmt: + v.visitBlock(stmt.List, Regular) + default: return v } + return nil +} + +func (v *visitor) visitBody(body *ast.BlockStmt, endKind BranchKind) { + if body != nil { + v.visitBlock(body.List, endKind) + } +} - for i, stmt := range block.List { - if ifStmt, ok := stmt.(*ast.IfStmt); ok { - v.visitChain(ifStmt, Chain{AtBlockEnd: i == len(block.List)-1}) +func (v *visitor) visitBlock(stmts []ast.Stmt, endKind BranchKind) { + for i, stmt := range stmts { + ifStmt, ok := stmt.(*ast.IfStmt) + if !ok { + ast.Walk(v, stmt) continue } - ast.Walk(v, stmt) + var chain Chain + if i == len(stmts)-1 { + chain.AtBlockEnd = true + chain.BlockEndKind = endKind + } + v.visitIf(ifStmt, chain) } - return nil } -func (v *visitor) visitChain(ifStmt *ast.IfStmt, chain Chain) { +func (v *visitor) visitIf(ifStmt *ast.IfStmt, chain Chain) { // look for other if-else chains nested inside this if { } block - ast.Walk(v, ifStmt.Body) - - if ifStmt.Else == nil { - // no else branch - return - } + v.visitBlock(ifStmt.Body.List, chain.BlockEndKind) if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { chain.HasInitializer = true } chain.If = BlockBranch(ifStmt.Body) + if ifStmt.Else == nil { + if v.args.AllowJump { + v.checkRule(ifStmt, chain) + } + return + } + switch elseBlock := ifStmt.Else.(type) { case *ast.IfStmt: if !chain.If.Deviates() { chain.HasPriorNonDeviating = true } - v.visitChain(elseBlock, chain) + v.visitIf(elseBlock, chain) case *ast.BlockStmt: // look for other if-else chains nested inside this else { } block - ast.Walk(v, elseBlock) + v.visitBlock(elseBlock.List, chain.BlockEndKind) + chain.HasElse = true chain.Else = BlockBranch(elseBlock) - if failMsg := v.rule.CheckIfElse(chain, v.args); failMsg != "" { - if chain.HasInitializer { - // if statement has a := initializer, so we might need to move the assignment - // onto its own line in case the body references it - failMsg += " (move short variable declaration to its own line if necessary)" - } - v.failures = append(v.failures, lint.Failure{ - Confidence: 1, - Node: v.target.node(ifStmt), - Failure: failMsg, - }) - } + v.checkRule(ifStmt, chain) default: - panic("invalid node type for else") + panic("unexpected node type for else") + } +} + +func (v *visitor) checkRule(ifStmt *ast.IfStmt, chain Chain) { + msg, found := v.check(chain, v.args) + if !found { + return // passed the check + } + if chain.HasInitializer { + // if statement has a := initializer, so we might need to move the assignment + // onto its own line in case the body references it + msg += " (move short variable declaration to its own line if necessary)" } + v.failures = append(v.failures, lint.Failure{ + Confidence: 1, + Node: v.target.node(ifStmt), + Failure: msg, + }) } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/target.go b/vendor/github.com/mgechev/revive/internal/ifelse/target.go index 81ff1c303..63755acf1 100644 --- a/vendor/github.com/mgechev/revive/internal/ifelse/target.go +++ b/vendor/github.com/mgechev/revive/internal/ifelse/target.go @@ -19,7 +19,6 @@ func (t Target) node(ifStmt *ast.IfStmt) ast.Node { return ifStmt case TargetElse: return ifStmt.Else - default: - panic("bad target") } + panic("bad target") } diff --git a/vendor/github.com/mgechev/revive/lint/config.go b/vendor/github.com/mgechev/revive/lint/config.go index d7ecd964a..485f61833 100644 --- a/vendor/github.com/mgechev/revive/lint/config.go +++ b/vendor/github.com/mgechev/revive/lint/config.go @@ -5,7 +5,7 @@ import ( ) // Arguments is type used for the arguments of a rule. -type Arguments = []interface{} +type Arguments = []any // FileFilters is type used for modeling file filters to apply to rules. type FileFilters = []*FileFilter diff --git a/vendor/github.com/mgechev/revive/lint/failure.go b/vendor/github.com/mgechev/revive/lint/failure.go index 479b0cb48..48095f9d7 100644 --- a/vendor/github.com/mgechev/revive/lint/failure.go +++ b/vendor/github.com/mgechev/revive/lint/failure.go @@ -5,6 +5,53 @@ import ( "go/token" ) +const ( + // FailureCategoryArgOrder indicates argument order issues. + FailureCategoryArgOrder FailureCategory = "arg-order" + // FailureCategoryBadPractice indicates bad practice issues. + FailureCategoryBadPractice FailureCategory = "bad practice" + // FailureCategoryCodeStyle indicates code style issues. + FailureCategoryCodeStyle FailureCategory = "code-style" + // FailureCategoryComments indicates comment issues. + FailureCategoryComments FailureCategory = "comments" + // FailureCategoryComplexity indicates complexity issues. + FailureCategoryComplexity FailureCategory = "complexity" + // FailureCategoryContent indicates content issues. + FailureCategoryContent FailureCategory = "content" + // FailureCategoryErrors indicates error handling issues. + FailureCategoryErrors FailureCategory = "errors" + // FailureCategoryImports indicates import issues. + FailureCategoryImports FailureCategory = "imports" + // FailureCategoryLogic indicates logic issues. + FailureCategoryLogic FailureCategory = "logic" + // FailureCategoryMaintenance indicates maintenance issues. + FailureCategoryMaintenance FailureCategory = "maintenance" + // FailureCategoryNaming indicates naming issues. + FailureCategoryNaming FailureCategory = "naming" + // FailureCategoryOptimization indicates optimization issues. + FailureCategoryOptimization FailureCategory = "optimization" + // FailureCategoryStyle indicates style issues. + FailureCategoryStyle FailureCategory = "style" + // FailureCategoryTime indicates time-related issues. + FailureCategoryTime FailureCategory = "time" + // FailureCategoryTypeInference indicates type inference issues. + FailureCategoryTypeInference FailureCategory = "type-inference" + // FailureCategoryUnaryOp indicates unary operation issues. + FailureCategoryUnaryOp FailureCategory = "unary-op" + // FailureCategoryUnexportedTypeInAPI indicates unexported type in API issues. + FailureCategoryUnexportedTypeInAPI FailureCategory = "unexported-type-in-api" + // FailureCategoryZeroValue indicates zero value issues. + FailureCategoryZeroValue FailureCategory = "zero-value" + + // failureCategoryInternal indicates internal failures. + failureCategoryInternal FailureCategory = "REVIVE_INTERNAL" + // failureCategoryValidity indicates validity issues. + failureCategoryValidity FailureCategory = "validity" +) + +// FailureCategory is the type for the failure categories. +type FailureCategory string + const ( // SeverityWarning declares failures of type warning SeverityWarning = "warning" @@ -25,7 +72,7 @@ type FailurePosition struct { type Failure struct { Failure string RuleName string - Category string + Category FailureCategory Position FailurePosition Node ast.Node `json:"-"` Confidence float64 @@ -37,3 +84,16 @@ type Failure struct { func (f *Failure) GetFilename() string { return f.Position.Start.Filename } + +// IsInternal returns true if this failure is internal, false otherwise. +func (f *Failure) IsInternal() bool { + return f.Category == failureCategoryInternal +} + +// NewInternalFailure yields an internal failure with the given message as failure message. +func NewInternalFailure(message string) Failure { + return Failure{ + Category: failureCategoryInternal, + Failure: message, + } +} diff --git a/vendor/github.com/mgechev/revive/lint/file.go b/vendor/github.com/mgechev/revive/lint/file.go index e34f8b7f4..0311210e5 100644 --- a/vendor/github.com/mgechev/revive/lint/file.go +++ b/vendor/github.com/mgechev/revive/lint/file.go @@ -2,6 +2,7 @@ package lint import ( "bytes" + "errors" "go/ast" "go/parser" "go/printer" @@ -48,7 +49,7 @@ func (f *File) ToPosition(pos token.Pos) token.Position { } // Render renders a node. -func (f *File) Render(x interface{}) string { +func (f *File) Render(x any) string { var buf bytes.Buffer if err := printer.Fprint(&buf, f.Pkg.fset, x); err != nil { panic(err) @@ -96,7 +97,7 @@ func (f *File) isMain() bool { const directiveSpecifyDisableReason = "specify-disable-reason" -func (f *File) lint(rules []Rule, config Config, failures chan Failure) { +func (f *File) lint(rules []Rule, config Config, failures chan Failure) error { rulesConfig := config.Rules _, mustSpecifyDisableReason := config.Directives[directiveSpecifyDisableReason] disabledIntervals := f.disabledIntervals(rules, mustSpecifyDisableReason, failures) @@ -107,6 +108,10 @@ func (f *File) lint(rules []Rule, config Config, failures chan Failure) { } currentFailures := currentRule.Apply(f, ruleConfig.Arguments) for idx, failure := range currentFailures { + if failure.IsInternal() { + return errors.New(failure.Failure) + } + if failure.RuleName == "" { failure.RuleName = currentRule.Name() } @@ -122,6 +127,7 @@ func (f *File) lint(rules []Rule, config Config, failures chan Failure) { } } } + return nil } type enableDisableConfig struct { @@ -140,10 +146,10 @@ const ( var re = regexp.MustCompile(directiveRE) func (f *File) disabledIntervals(rules []Rule, mustSpecifyDisableReason bool, failures chan Failure) disabledIntervalsMap { - enabledDisabledRulesMap := make(map[string][]enableDisableConfig) + enabledDisabledRulesMap := map[string][]enableDisableConfig{} getEnabledDisabledIntervals := func() disabledIntervalsMap { - result := make(disabledIntervalsMap) + result := disabledIntervalsMap{} for ruleName, disabledArr := range enabledDisabledRulesMap { ruleResult := []DisabledInterval{} @@ -191,13 +197,14 @@ func (f *File) disabledIntervals(rules []Rule, mustSpecifyDisableReason bool, fa handleRules := func(_, modifier string, isEnabled bool, line int, ruleNames []string) []DisabledInterval { var result []DisabledInterval for _, name := range ruleNames { - if modifier == "line" { + switch modifier { + case "line": handleConfig(isEnabled, line, name) handleConfig(!isEnabled, line, name) - } else if modifier == "next-line" { + case "next-line": handleConfig(isEnabled, line+1, name) handleConfig(!isEnabled, line+1, name) - } else { + default: handleConfig(isEnabled, line, name) } } @@ -260,21 +267,22 @@ func (File) filterFailures(failures []Failure, disabledIntervals disabledInterva intervals, ok := disabledIntervals[failure.RuleName] if !ok { result = append(result, failure) - } else { - include := true - for _, interval := range intervals { - intStart := interval.From.Line - intEnd := interval.To.Line - if (fStart >= intStart && fStart <= intEnd) || - (fEnd >= intStart && fEnd <= intEnd) { - include = false - break - } - } - if include { - result = append(result, failure) + continue + } + + include := true + for _, interval := range intervals { + intStart := interval.From.Line + intEnd := interval.To.Line + if (fStart >= intStart && fStart <= intEnd) || + (fEnd >= intStart && fEnd <= intEnd) { + include = false + break } } + if include { + result = append(result, failure) + } } return result } diff --git a/vendor/github.com/mgechev/revive/lint/filefilter.go b/vendor/github.com/mgechev/revive/lint/filefilter.go index 8da090b9c..fb2c9bbac 100644 --- a/vendor/github.com/mgechev/revive/lint/filefilter.go +++ b/vendor/github.com/mgechev/revive/lint/filefilter.go @@ -55,19 +55,21 @@ func (ff *FileFilter) MatchFileName(name string) bool { return ff.rx.MatchString(name) } -var fileFilterInvalidGlobRegexp = regexp.MustCompile(`[^/]\*\*[^/]`) -var escapeRegexSymbols = ".+{}()[]^$" +var ( + fileFilterInvalidGlobRegexp = regexp.MustCompile(`[^/]\*\*[^/]`) + escapeRegexSymbols = ".+{}()[]^$" +) func (ff *FileFilter) prepareRegexp() error { var err error - var src = ff.raw + src := ff.raw if src == "TEST" { src = "~_test\\.go" } if strings.HasPrefix(src, "~") { ff.rx, err = regexp.Compile(src[1:]) if err != nil { - return fmt.Errorf("invalid file filter [%s], regexp compile error: [%v]", ff.raw, err) + return fmt.Errorf("invalid file filter [%s], regexp compile error: [%w]", ff.raw, err) } return nil } @@ -110,7 +112,7 @@ func (ff *FileFilter) prepareRegexp() error { rxBuild.WriteByte('$') ff.rx, err = regexp.Compile(rxBuild.String()) if err != nil { - return fmt.Errorf("invalid file filter [%s], regexp compile error after glob expand: [%v]", ff.raw, err) + return fmt.Errorf("invalid file filter [%s], regexp compile error after glob expand: [%w]", ff.raw, err) } return nil } @@ -122,7 +124,7 @@ func (ff *FileFilter) prepareRegexp() error { fillRx = "^" + fillRx + "$" ff.rx, err = regexp.Compile(fillRx) if err != nil { - return fmt.Errorf("invalid file filter [%s], regexp compile full path: [%v]", ff.raw, err) + return fmt.Errorf("invalid file filter [%s], regexp compile full path: [%w]", ff.raw, err) } return nil } diff --git a/vendor/github.com/mgechev/revive/lint/linter.go b/vendor/github.com/mgechev/revive/lint/linter.go index b777f9251..73b5341bd 100644 --- a/vendor/github.com/mgechev/revive/lint/linter.go +++ b/vendor/github.com/mgechev/revive/lint/linter.go @@ -10,10 +10,10 @@ import ( "regexp" "strconv" "strings" - "sync" goversion "github.com/hashicorp/go-version" "golang.org/x/mod/modfile" + "golang.org/x/sync/errgroup" ) // ReadFile defines an abstraction for reading files. @@ -54,8 +54,8 @@ func (l Linter) readFile(path string) (result []byte, err error) { } var ( - genHdr = []byte("// Code generated ") - genFtr = []byte(" DO NOT EDIT.") + generatedPrefix = []byte("// Code generated ") + generatedSuffix = []byte(" DO NOT EDIT.") defaultGoVersion = goversion.Must(goversion.NewVersion("1.0")) ) @@ -63,7 +63,7 @@ var ( func (l *Linter) Lint(packages [][]string, ruleSet []Rule, config Config) (<-chan Failure, error) { failures := make(chan Failure) - perModVersions := make(map[string]*goversion.Version) + perModVersions := map[string]*goversion.Version{} perPkgVersions := make([]*goversion.Version, len(packages)) for n, files := range packages { if len(files) == 0 { @@ -101,20 +101,23 @@ func (l *Linter) Lint(packages [][]string, ruleSet []Rule, config Config) (<-cha perPkgVersions[n] = v } - var wg sync.WaitGroup + var wg errgroup.Group for n := range packages { - wg.Add(1) - go func(pkg []string, gover *goversion.Version) { + wg.Go(func() error { + pkg := packages[n] + gover := perPkgVersions[n] if err := l.lintPackage(pkg, gover, ruleSet, config, failures); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + return fmt.Errorf("error during linting: %w", err) } - wg.Done() - }(packages[n], perPkgVersions[n]) + return nil + }) } go func() { - wg.Wait() + err := wg.Wait() + if err != nil { + failures <- NewInternalFailure(err.Error()) + } close(failures) }() @@ -152,9 +155,7 @@ func (l *Linter) lintPackage(filenames []string, gover *goversion.Version, ruleS return nil } - pkg.lint(ruleSet, config, failures) - - return nil + return pkg.lint(ruleSet, config, failures) } func detectGoMod(dir string) (rootDir string, ver *goversion.Version, err error) { @@ -165,12 +166,12 @@ func detectGoMod(dir string) (rootDir string, ver *goversion.Version, err error) mod, err := os.ReadFile(modFileName) if err != nil { - return "", nil, fmt.Errorf("failed to read %q, got %v", modFileName, err) + return "", nil, fmt.Errorf("failed to read %q, got %w", modFileName, err) } modAst, err := modfile.ParseLax(modFileName, mod, nil) if err != nil { - return "", nil, fmt.Errorf("failed to parse %q, got %v", modFileName, err) + return "", nil, fmt.Errorf("failed to parse %q, got %w", modFileName, err) } if modAst.Go == nil { @@ -209,7 +210,7 @@ func isGenerated(src []byte) bool { sc := bufio.NewScanner(bytes.NewReader(src)) for sc.Scan() { b := sc.Bytes() - if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + if bytes.HasPrefix(b, generatedPrefix) && bytes.HasSuffix(b, generatedSuffix) && len(b) >= len(generatedPrefix)+len(generatedSuffix) { return true } } @@ -222,7 +223,7 @@ func addInvalidFileFailure(filename, errStr string, failures chan Failure) { failures <- Failure{ Confidence: 1, Failure: fmt.Sprintf("invalid file %s: %v", filename, errStr), - Category: "validity", + Category: failureCategoryValidity, Position: position, } } diff --git a/vendor/github.com/mgechev/revive/lint/package.go b/vendor/github.com/mgechev/revive/lint/package.go index 4a633f35a..dfc701f7e 100644 --- a/vendor/github.com/mgechev/revive/lint/package.go +++ b/vendor/github.com/mgechev/revive/lint/package.go @@ -1,6 +1,7 @@ package lint import ( + "errors" "go/ast" "go/importer" "go/token" @@ -8,7 +9,9 @@ import ( "sync" goversion "github.com/hashicorp/go-version" + "golang.org/x/sync/errgroup" + "github.com/mgechev/revive/internal/astutils" "github.com/mgechev/revive/internal/typeparams" ) @@ -31,10 +34,11 @@ type Package struct { var ( trueValue = 1 falseValue = 2 - notSet = 3 + go115 = goversion.Must(goversion.NewVersion("1.15")) go121 = goversion.Must(goversion.NewVersion("1.21")) go122 = goversion.Must(goversion.NewVersion("1.22")) + go124 = goversion.Must(goversion.NewVersion("1.24")) ) // Files return package's files. @@ -88,21 +92,21 @@ func (p *Package) TypeCheck() error { p.Lock() defer p.Unlock() - // If type checking has already been performed - // skip it. - if p.typesInfo != nil || p.typesPkg != nil { + alreadyTypeChecked := p.typesInfo != nil || p.typesPkg != nil + if alreadyTypeChecked { return nil } + config := &types.Config{ // By setting a no-op error reporter, the type checker does as much work as possible. Error: func(error) {}, Importer: importer.Default(), } info := &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Scopes: make(map[ast.Node]*types.Scope), + Types: map[ast.Expr]types.TypeAndValue{}, + Defs: map[*ast.Ident]types.Object{}, + Uses: map[*ast.Ident]types.Object{}, + Scopes: map[ast.Node]*types.Scope{}, } var anyFile *File var astFiles []*ast.File @@ -111,6 +115,11 @@ func (p *Package) TypeCheck() error { astFiles = append(astFiles, f.AST) } + if anyFile == nil { + // this is unlikely to happen, but technically guarantees anyFile to not be nil + return errors.New("no ast.File found") + } + typesPkg, err := check(config, anyFile.AST.Name.Name, p.fset, astFiles, info) // Remember the typechecking info, even if config.Check failed, @@ -135,7 +144,7 @@ func check(config *types.Config, n string, fset *token.FileSet, astFiles []*ast. return config.Check(n, fset, astFiles, info) } -// TypeOf returns the type of an expression. +// TypeOf returns the type of expression. func (p *Package) TypeOf(expr ast.Expr) types.Type { if p.typesInfo == nil { return nil @@ -143,56 +152,53 @@ func (p *Package) TypeOf(expr ast.Expr) types.Type { return p.typesInfo.TypeOf(expr) } -type walker struct { - nmap map[string]int - has map[string]int -} +type sortableMethodsFlags int -func (w *walker) Visit(n ast.Node) ast.Visitor { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return w - } - // TODO(dsymonds): We could check the signature to be more precise. - recv := typeparams.ReceiverType(fn) - if i, ok := w.nmap[fn.Name.Name]; ok { - w.has[recv] |= i - } - return w -} +// flags for sortable interface methods. +const ( + bfLen sortableMethodsFlags = 1 << iota + bfLess + bfSwap +) func (p *Package) scanSortable() { - p.sortable = make(map[string]bool) - - // bitfield for which methods exist on each type. - const ( - bfLen = 1 << iota - bfLess - bfSwap - ) - nmap := map[string]int{"Len": bfLen, "Less": bfLess, "Swap": bfSwap} - has := make(map[string]int) + sortableFlags := map[string]sortableMethodsFlags{} for _, f := range p.files { - ast.Walk(&walker{nmap, has}, f.AST) + for _, decl := range f.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + isAMethodDeclaration := ok && fn.Recv != nil && len(fn.Recv.List) != 0 + if !isAMethodDeclaration { + continue + } + + recvType := typeparams.ReceiverType(fn) + sortableFlags[recvType] |= getSortableMethodFlagForFunction(fn) + } } - for typ, ms := range has { + + p.sortable = make(map[string]bool, len(sortableFlags)) + for typ, ms := range sortableFlags { if ms == bfLen|bfLess|bfSwap { p.sortable[typ] = true } } } -func (p *Package) lint(rules []Rule, config Config, failures chan Failure) { +func (p *Package) lint(rules []Rule, config Config, failures chan Failure) error { p.scanSortable() - var wg sync.WaitGroup + var eg errgroup.Group for _, file := range p.files { - wg.Add(1) - go (func(file *File) { - file.lint(rules, config, failures) - wg.Done() - })(file) + eg.Go(func() error { + return file.lint(rules, config, failures) + }) } - wg.Wait() + + return eg.Wait() +} + +// IsAtLeastGo115 returns true if the Go version for this package is 1.15 or higher, false otherwise +func (p *Package) IsAtLeastGo115() bool { + return p.goVersion.GreaterThanOrEqual(go115) } // IsAtLeastGo121 returns true if the Go version for this package is 1.21 or higher, false otherwise @@ -204,3 +210,21 @@ func (p *Package) IsAtLeastGo121() bool { func (p *Package) IsAtLeastGo122() bool { return p.goVersion.GreaterThanOrEqual(go122) } + +// IsAtLeastGo124 returns true if the Go version for this package is 1.24 or higher, false otherwise +func (p *Package) IsAtLeastGo124() bool { + return p.goVersion.GreaterThanOrEqual(go124) +} + +func getSortableMethodFlagForFunction(fn *ast.FuncDecl) sortableMethodsFlags { + switch { + case astutils.FuncSignatureIs(fn, "Len", []string{}, []string{"int"}): + return bfLen + case astutils.FuncSignatureIs(fn, "Less", []string{"int", "int"}, []string{"bool"}): + return bfLess + case astutils.FuncSignatureIs(fn, "Swap", []string{"int", "int"}, []string{}): + return bfSwap + default: + return 0 + } +} diff --git a/vendor/github.com/mgechev/revive/lint/rule.go b/vendor/github.com/mgechev/revive/lint/rule.go index ccc66691c..cc424e96a 100644 --- a/vendor/github.com/mgechev/revive/lint/rule.go +++ b/vendor/github.com/mgechev/revive/lint/rule.go @@ -17,9 +17,9 @@ type Rule interface { Apply(*File, Arguments) []Failure } -// AbstractRule defines an abstract rule. -type AbstractRule struct { - Failures []Failure +// ConfigurableRule defines an abstract configurable rule interface. +type ConfigurableRule interface { + Configure(Arguments) error } // ToFailurePosition returns the failure position. diff --git a/vendor/github.com/mgechev/revive/rule/add_constant.go b/vendor/github.com/mgechev/revive/rule/add_constant.go index 399382c8b..c58c369e6 100644 --- a/vendor/github.com/mgechev/revive/rule/add_constant.go +++ b/vendor/github.com/mgechev/revive/rule/add_constant.go @@ -1,12 +1,12 @@ package rule import ( + "errors" "fmt" "go/ast" "regexp" "strconv" "strings" - "sync" "github.com/mgechev/revive/lint" ) @@ -31,19 +31,15 @@ func (wl allowList) add(kind, list string) { } } -// AddConstantRule lints unused params in functions. +// AddConstantRule suggests using constants instead of magic numbers and string literals. type AddConstantRule struct { allowList allowList ignoreFunctions []*regexp.Regexp strLitLimit int - - configureOnce sync.Once } // Apply applies the rule to given file. -func (r *AddConstantRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *AddConstantRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { @@ -52,11 +48,11 @@ func (r *AddConstantRule) Apply(file *lint.File, arguments lint.Arguments) []lin w := &lintAddConstantRule{ onFailure: onFailure, - strLits: make(map[string]int), + strLits: map[string]int{}, strLitLimit: r.strLitLimit, allowList: r.allowList, ignoreFunctions: r.ignoreFunctions, - structTags: make(map[*ast.BasicLit]struct{}), + structTags: map[*ast.BasicLit]struct{}{}, } ast.Walk(w, file.AST) @@ -175,7 +171,7 @@ func (w *lintAddConstantRule) checkStrLit(n *ast.BasicLit) { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: fmt.Sprintf("string literal %s appears, at least, %d times, create a named constant for it", n.Value, w.strLits[n.Value]), }) w.strLits[n.Value] = -1 // mark it to avoid failing again on the same literal @@ -191,7 +187,7 @@ func (w *lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: fmt.Sprintf("avoid magic numbers like '%s', create a named constant for it", n.Value), }) } @@ -201,67 +197,71 @@ func (w *lintAddConstantRule) isStructTag(n *ast.BasicLit) bool { return ok } -func (r *AddConstantRule) configure(arguments lint.Arguments) { - if r.allowList == nil { - r.strLitLimit = defaultStrLitLimit - r.allowList = newAllowList() - if len(arguments) > 0 { - args, ok := arguments[0].(map[string]any) +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *AddConstantRule) Configure(arguments lint.Arguments) error { + r.strLitLimit = defaultStrLitLimit + r.allowList = newAllowList() + if len(arguments) == 0 { + return nil + } + args, ok := arguments[0].(map[string]any) + if !ok { + return fmt.Errorf("invalid argument to the add-constant rule, expecting a k,v map. Got %T", arguments[0]) + } + for k, v := range args { + kind := "" + switch k { + case "allowFloats": + kind = kindFLOAT + fallthrough + case "allowInts": + if kind == "" { + kind = kindINT + } + fallthrough + case "allowStrs": + if kind == "" { + kind = kindSTRING + } + list, ok := v.(string) + if !ok { + return fmt.Errorf("invalid argument to the add-constant rule, string expected. Got '%v' (%T)", v, v) + } + r.allowList.add(kind, list) + case "maxLitCount": + sl, ok := v.(string) + if !ok { + return fmt.Errorf("invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v' (%T)", v, v) + } + + limit, err := strconv.Atoi(sl) + if err != nil { + return fmt.Errorf("invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v'", v) + } + r.strLitLimit = limit + case "ignoreFuncs": + excludes, ok := v.(string) if !ok { - panic(fmt.Sprintf("Invalid argument to the add-constant rule. Expecting a k,v map, got %T", arguments[0])) + return fmt.Errorf("invalid argument to the ignoreFuncs parameter of add-constant rule, string expected. Got '%v' (%T)", v, v) } - for k, v := range args { - kind := "" - switch k { - case "allowFloats": - kind = kindFLOAT - fallthrough - case "allowInts": - if kind == "" { - kind = kindINT - } - fallthrough - case "allowStrs": - if kind == "" { - kind = kindSTRING - } - list, ok := v.(string) - if !ok { - panic(fmt.Sprintf("Invalid argument to the add-constant rule, string expected. Got '%v' (%T)", v, v)) - } - r.allowList.add(kind, list) - case "maxLitCount": - sl, ok := v.(string) - if !ok { - panic(fmt.Sprintf("Invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v' (%T)", v, v)) - } - - limit, err := strconv.Atoi(sl) - if err != nil { - panic(fmt.Sprintf("Invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v'", v)) - } - r.strLitLimit = limit - case "ignoreFuncs": - excludes, ok := v.(string) - if !ok { - panic(fmt.Sprintf("Invalid argument to the ignoreFuncs parameter of add-constant rule, string expected. Got '%v' (%T)", v, v)) - } - - for _, exclude := range strings.Split(excludes, ",") { - exclude = strings.Trim(exclude, " ") - if exclude == "" { - panic("Invalid argument to the ignoreFuncs parameter of add-constant rule, expected regular expression must not be empty.") - } - - exp, err := regexp.Compile(exclude) - if err != nil { - panic(fmt.Sprintf("Invalid argument to the ignoreFuncs parameter of add-constant rule: regexp %q does not compile: %v", exclude, err)) - } - - r.ignoreFunctions = append(r.ignoreFunctions, exp) - } + + for _, exclude := range strings.Split(excludes, ",") { + exclude = strings.Trim(exclude, " ") + if exclude == "" { + return errors.New("invalid argument to the ignoreFuncs parameter of add-constant rule, expected regular expression must not be empty") } + + exp, err := regexp.Compile(exclude) + if err != nil { + return fmt.Errorf("invalid argument to the ignoreFuncs parameter of add-constant rule: regexp %q does not compile: %w", exclude, err) + } + + r.ignoreFunctions = append(r.ignoreFunctions, exp) } } } + + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/argument_limit.go b/vendor/github.com/mgechev/revive/rule/argument_limit.go index b4d56de0e..7fd6a382d 100644 --- a/vendor/github.com/mgechev/revive/rule/argument_limit.go +++ b/vendor/github.com/mgechev/revive/rule/argument_limit.go @@ -1,84 +1,67 @@ package rule import ( + "errors" "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) -// ArgumentsLimitRule lints given else constructs. +// ArgumentsLimitRule lints the number of arguments a function can receive. type ArgumentsLimitRule struct { max int - - configureOnce sync.Once } const defaultArgumentsLimit = 8 -func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ArgumentsLimitRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.max = defaultArgumentsLimit - return + return nil } maxArguments, ok := arguments[0].(int64) // Alt. non panicking version if !ok { - panic(`invalid value passed as argument number to the "argument-limit" rule`) + return errors.New(`invalid value passed as argument number to the "argument-limit" rule`) } r.max = int(maxArguments) + return nil } // Apply applies the rule to given file. -func (r *ArgumentsLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *ArgumentsLimitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - walker := lintArgsNum{ - max: r.max, - onFailure: onFailure, - } - - ast.Walk(walker, file.AST) - - return failures -} - -// Name returns the rule name. -func (*ArgumentsLimitRule) Name() string { - return "argument-limit" -} -type lintArgsNum struct { - max int - onFailure func(lint.Failure) -} + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } -func (w lintArgsNum) Visit(n ast.Node) ast.Visitor { - node, ok := n.(*ast.FuncDecl) - if !ok { - return w - } + numParams := 0 + for _, l := range funcDecl.Type.Params.List { + numParams += len(l.Names) + } - num := 0 - for _, l := range node.Type.Params.List { - for range l.Names { - num++ + if numParams <= r.max { + continue } - } - if num > w.max { - w.onFailure(lint.Failure{ + failures = append(failures, lint.Failure{ Confidence: 1, - Failure: fmt.Sprintf("maximum number of arguments per function exceeded; max %d but got %d", w.max, num), - Node: node.Type, + Failure: fmt.Sprintf("maximum number of arguments per function exceeded; max %d but got %d", r.max, numParams), + Node: funcDecl.Type, }) } - return nil // skip visiting the body of the function + return failures +} + +// Name returns the rule name. +func (*ArgumentsLimitRule) Name() string { + return "argument-limit" } diff --git a/vendor/github.com/mgechev/revive/rule/atomic.go b/vendor/github.com/mgechev/revive/rule/atomic.go index 287b28c21..61219765f 100644 --- a/vendor/github.com/mgechev/revive/rule/atomic.go +++ b/vendor/github.com/mgechev/revive/rule/atomic.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// AtomicRule lints given else constructs. +// AtomicRule lints usages of the `sync/atomic` package. type AtomicRule struct{} // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/banned_characters.go b/vendor/github.com/mgechev/revive/rule/banned_characters.go index 926b32c21..7eb026b03 100644 --- a/vendor/github.com/mgechev/revive/rule/banned_characters.go +++ b/vendor/github.com/mgechev/revive/rule/banned_characters.go @@ -4,7 +4,6 @@ import ( "fmt" "go/ast" "strings" - "sync" "github.com/mgechev/revive/lint" ) @@ -12,23 +11,31 @@ import ( // BannedCharsRule checks if a file contains banned characters. type BannedCharsRule struct { bannedCharList []string - - configureOnce sync.Once } const bannedCharsRuleName = "banned-characters" -func (r *BannedCharsRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *BannedCharsRule) Configure(arguments lint.Arguments) error { if len(arguments) > 0 { - checkNumberOfArguments(1, arguments, bannedCharsRuleName) - r.bannedCharList = r.getBannedCharsList(arguments) + err := checkNumberOfArguments(1, arguments, bannedCharsRuleName) + if err != nil { + return err + } + list, err := r.getBannedCharsList(arguments) + if err != nil { + return err + } + + r.bannedCharList = list } + return nil } // Apply applied the rule to the given file. -func (r *BannedCharsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *BannedCharsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) @@ -49,17 +56,17 @@ func (*BannedCharsRule) Name() string { } // getBannedCharsList converts arguments into the banned characters list -func (r *BannedCharsRule) getBannedCharsList(args lint.Arguments) []string { +func (r *BannedCharsRule) getBannedCharsList(args lint.Arguments) ([]string, error) { var bannedChars []string for _, char := range args { charStr, ok := char.(string) if !ok { - panic(fmt.Sprintf("Invalid argument for the %s rule: expecting a string, got %T", r.Name(), char)) + return nil, fmt.Errorf("invalid argument for the %s rule: expecting a string, got %T", r.Name(), char) } bannedChars = append(bannedChars, charStr) } - return bannedChars + return bannedChars, nil } type lintBannedCharsRule struct { diff --git a/vendor/github.com/mgechev/revive/rule/bare_return.go b/vendor/github.com/mgechev/revive/rule/bare_return.go index 147fa84db..c5a9441f6 100644 --- a/vendor/github.com/mgechev/revive/rule/bare_return.go +++ b/vendor/github.com/mgechev/revive/rule/bare_return.go @@ -6,7 +6,7 @@ import ( "github.com/mgechev/revive/lint" ) -// BareReturnRule lints given else constructs. +// BareReturnRule lints bare returns. type BareReturnRule struct{} // Apply applies the rule to given file. @@ -49,7 +49,7 @@ func (w lintBareReturnRule) checkFunc(results *ast.FieldList, body *ast.BlockStm return // nothing to do } - brf := bareReturnFinder{w.onFailure} + brf := bareReturnFinder(w) ast.Walk(brf, body) } @@ -60,8 +60,8 @@ type bareReturnFinder struct { func (w bareReturnFinder) Visit(node ast.Node) ast.Visitor { _, ok := node.(*ast.FuncLit) if ok { - // skip analysing function literals - // they will be analysed by the lintBareReturnRule.Visit method + // skip analyzing function literals + // they will be analyzed by the lintBareReturnRule.Visit method return nil } diff --git a/vendor/github.com/mgechev/revive/rule/blank_imports.go b/vendor/github.com/mgechev/revive/rule/blank_imports.go index 0ddb4aad2..b3f7a3cdc 100644 --- a/vendor/github.com/mgechev/revive/rule/blank_imports.go +++ b/vendor/github.com/mgechev/revive/rule/blank_imports.go @@ -7,7 +7,7 @@ import ( "github.com/mgechev/revive/lint" ) -// BlankImportsRule lints given else constructs. +// BlankImportsRule lints blank imports. type BlankImportsRule struct{} // Name returns the rule name. @@ -23,7 +23,6 @@ func (r *BlankImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failu const ( message = "a blank import should be only in a main or test package, or have a comment justifying it" - category = "imports" embedImportPath = `"embed"` ) @@ -55,7 +54,7 @@ func (r *BlankImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failu // This is the first blank import of a group. if imp.Doc == nil && imp.Comment == nil { - failures = append(failures, lint.Failure{Failure: message, Category: category, Node: imp, Confidence: 1}) + failures = append(failures, lint.Failure{Failure: message, Category: lint.FailureCategoryImports, Node: imp, Confidence: 1}) } } @@ -73,3 +72,7 @@ func (*BlankImportsRule) fileHasValidEmbedComment(fileAst *ast.File) bool { return false } + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } diff --git a/vendor/github.com/mgechev/revive/rule/bool_literal_in_expr.go b/vendor/github.com/mgechev/revive/rule/bool_literal_in_expr.go index 71551e55a..dd1e9be87 100644 --- a/vendor/github.com/mgechev/revive/rule/bool_literal_in_expr.go +++ b/vendor/github.com/mgechev/revive/rule/bool_literal_in_expr.go @@ -53,16 +53,16 @@ func (w *lintBoolLiteral) Visit(node ast.Node) ast.Visitor { isConstant := (n.Op == token.LAND && lexeme == "false") || (n.Op == token.LOR && lexeme == "true") if isConstant { - w.addFailure(n, "Boolean expression seems to always evaluate to "+lexeme, "logic") + w.addFailure(n, "Boolean expression seems to always evaluate to "+lexeme, lint.FailureCategoryLogic) } else { - w.addFailure(n, "omit Boolean literal in expression", "style") + w.addFailure(n, "omit Boolean literal in expression", lint.FailureCategoryStyle) } } return w } -func (w lintBoolLiteral) addFailure(node ast.Node, msg, cat string) { +func (w lintBoolLiteral) addFailure(node ast.Node, msg string, cat lint.FailureCategory) { w.onFailure(lint.Failure{ Confidence: 1, Node: node, @@ -70,3 +70,23 @@ func (w lintBoolLiteral) addFailure(node ast.Node, msg, cat string) { Failure: msg, }) } + +// isBoolOp returns true if the given token corresponds to a bool operator. +func isBoolOp(t token.Token) bool { + switch t { + case token.LAND, token.LOR, token.EQL, token.NEQ: + return true + } + + return false +} + +func isExprABooleanLit(n ast.Node) (lexeme string, ok bool) { + oper, ok := n.(*ast.Ident) + + if !ok { + return "", false + } + + return oper.Name, oper.Name == "true" || oper.Name == "false" +} diff --git a/vendor/github.com/mgechev/revive/rule/call_to_gc.go b/vendor/github.com/mgechev/revive/rule/call_to_gc.go index 9c68380a4..c3eb1bb71 100644 --- a/vendor/github.com/mgechev/revive/rule/call_to_gc.go +++ b/vendor/github.com/mgechev/revive/rule/call_to_gc.go @@ -62,7 +62,7 @@ func (w lintCallToGC) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 1, Node: node, - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Failure: "explicit call to the garbage collector", }) diff --git a/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go b/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go index ecde3882e..0c95c544f 100644 --- a/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go +++ b/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go @@ -4,39 +4,38 @@ import ( "fmt" "go/ast" "go/token" - "sync" "github.com/mgechev/revive/lint" "golang.org/x/tools/go/ast/astutil" ) -// CognitiveComplexityRule lints given else constructs. +// CognitiveComplexityRule sets restriction for maximum cognitive complexity. type CognitiveComplexityRule struct { maxComplexity int - - configureOnce sync.Once } const defaultMaxCognitiveComplexity = 7 -func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *CognitiveComplexityRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.maxComplexity = defaultMaxCognitiveComplexity - return + return nil } complexity, ok := arguments[0].(int64) if !ok { - panic(fmt.Sprintf("invalid argument type for cognitive-complexity, expected int64, got %T", arguments[0])) + return fmt.Errorf("invalid argument type for cognitive-complexity, expected int64, got %T", arguments[0]) } r.maxComplexity = int(complexity) + return nil } // Apply applies the rule to given file. -func (r *CognitiveComplexityRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *CognitiveComplexityRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure linter := cognitiveComplexityLinter{ @@ -67,12 +66,14 @@ func (w cognitiveComplexityLinter) lintCognitiveComplexity() { f := w.file for _, decl := range f.AST.Decls { if fn, ok := decl.(*ast.FuncDecl); ok && fn.Body != nil { - v := cognitiveComplexityVisitor{} + v := cognitiveComplexityVisitor{ + name: fn.Name, + } c := v.subTreeComplexity(fn.Body) if c > w.maxComplexity { w.onFailure(lint.Failure{ Confidence: 1, - Category: "maintenance", + Category: lint.FailureCategoryMaintenance, Failure: fmt.Sprintf("function %s has cognitive complexity %d (> max enabled %d)", funcName(fn), c, w.maxComplexity), Node: fn, }) @@ -82,13 +83,14 @@ func (w cognitiveComplexityLinter) lintCognitiveComplexity() { } type cognitiveComplexityVisitor struct { + name *ast.Ident complexity int nestingLevel int } // subTreeComplexity calculates the cognitive complexity of an AST-subtree. -func (v cognitiveComplexityVisitor) subTreeComplexity(n ast.Node) int { - ast.Walk(&v, n) +func (v *cognitiveComplexityVisitor) subTreeComplexity(n ast.Node) int { + ast.Walk(v, n) return v.complexity } @@ -120,13 +122,20 @@ func (v *cognitiveComplexityVisitor) Visit(n ast.Node) ast.Visitor { return nil case *ast.BinaryExpr: v.complexity += v.binExpComplexity(n) - return nil // skip visiting binexp sub-tree (already visited by binExpComplexity) + return nil // skip visiting binexp subtree (already visited by binExpComplexity) case *ast.BranchStmt: if n.Label != nil { v.complexity++ } + case *ast.CallExpr: + if ident, ok := n.Fun.(*ast.Ident); ok { + if ident.Obj == v.name.Obj && ident.Name == v.name.Name { + // called by same function directly (direct recursion) + v.complexity++ + return nil + } + } } - // TODO handle (at least) direct recursion return v } @@ -147,7 +156,7 @@ func (v *cognitiveComplexityVisitor) walk(complexityIncrement int, targets ...as v.nestingLevel = nesting } -func (cognitiveComplexityVisitor) binExpComplexity(n *ast.BinaryExpr) int { +func (*cognitiveComplexityVisitor) binExpComplexity(n *ast.BinaryExpr) int { calculator := binExprComplexityCalculator{opsStack: []token.Token{}} astutil.Apply(n, calculator.pre, calculator.post) diff --git a/vendor/github.com/mgechev/revive/rule/comment_spacings.go b/vendor/github.com/mgechev/revive/rule/comment_spacings.go index 7bdc0e71d..5187bb218 100644 --- a/vendor/github.com/mgechev/revive/rule/comment_spacings.go +++ b/vendor/github.com/mgechev/revive/rule/comment_spacings.go @@ -3,34 +3,33 @@ package rule import ( "fmt" "strings" - "sync" "github.com/mgechev/revive/lint" ) -// CommentSpacingsRule check the whether there is a space between +// CommentSpacingsRule check whether there is a space between // the comment symbol( // ) and the start of the comment text type CommentSpacingsRule struct { allowList []string - - configureOnce sync.Once } -func (r *CommentSpacingsRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *CommentSpacingsRule) Configure(arguments lint.Arguments) error { r.allowList = []string{} for _, arg := range arguments { allow, ok := arg.(string) // Alt. non panicking version if !ok { - panic(fmt.Sprintf("invalid argument %v for %s; expected string but got %T", arg, r.Name(), arg)) + return fmt.Errorf("invalid argument %v for %s; expected string but got %T", arg, r.Name(), arg) } r.allowList = append(r.allowList, `//`+allow) } + return nil } // Apply the rule. -func (r *CommentSpacingsRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *CommentSpacingsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure for _, cg := range file.AST.Comments { @@ -58,7 +57,7 @@ func (r *CommentSpacingsRule) Apply(file *lint.File, args lint.Arguments) []lint failures = append(failures, lint.Failure{ Node: comment, Confidence: 1, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "no space between comment delimiter and comment text", }) } diff --git a/vendor/github.com/mgechev/revive/rule/comments_density.go b/vendor/github.com/mgechev/revive/rule/comments_density.go index f2382b1f0..e83c20add 100644 --- a/vendor/github.com/mgechev/revive/rule/comments_density.go +++ b/vendor/github.com/mgechev/revive/rule/comments_density.go @@ -4,37 +4,36 @@ import ( "fmt" "go/ast" "strings" - "sync" "github.com/mgechev/revive/lint" ) -// CommentsDensityRule lints given else constructs. +// CommentsDensityRule enforces a minimum comment / code relation. type CommentsDensityRule struct { minimumCommentsDensity int64 - - configureOnce sync.Once } const defaultMinimumCommentsPercentage = 0 -func (r *CommentsDensityRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *CommentsDensityRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.minimumCommentsDensity = defaultMinimumCommentsPercentage - return + return nil } var ok bool r.minimumCommentsDensity, ok = arguments[0].(int64) if !ok { - panic(fmt.Sprintf("invalid argument for %q rule: argument should be an int, got %T", r.Name(), arguments[0])) + return fmt.Errorf("invalid argument for %q rule: argument should be an int, got %T", r.Name(), arguments[0]) } + return nil } // Apply applies the rule to given file. -func (r *CommentsDensityRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *CommentsDensityRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { commentsLines := countDocLines(file.AST.Comments) statementsCount := countStatements(file.AST) density := (float32(commentsLines) / float32(statementsCount+commentsLines)) * 100 diff --git a/vendor/github.com/mgechev/revive/rule/confusing_naming.go b/vendor/github.com/mgechev/revive/rule/confusing_naming.go index 32f6dd803..8a8ea13f8 100644 --- a/vendor/github.com/mgechev/revive/rule/confusing_naming.go +++ b/vendor/github.com/mgechev/revive/rule/confusing_naming.go @@ -35,7 +35,7 @@ func (ps *packages) methodNames(lp *lint.Package) pkgMethods { } } - pkgm := pkgMethods{pkg: lp, methods: make(map[string]map[string]*referenceMethod), mu: &sync.Mutex{}} + pkgm := pkgMethods{pkg: lp, methods: map[string]map[string]*referenceMethod{}, mu: &sync.Mutex{}} ps.pkgs = append(ps.pkgs, pkgm) return pkgm @@ -102,7 +102,7 @@ func checkMethodName(holder string, id *ast.Ident, w *lintConfusingNames) { Failure: fmt.Sprintf("Method '%s' differs only by capitalization to %s '%s' in %s", id.Name, kind, refMethod.id.Name, fileName), Confidence: 1, Node: id, - Category: "naming", + Category: lint.FailureCategoryNaming, }) return @@ -176,7 +176,7 @@ func checkStructFields(fields *ast.FieldList, structName string, w *lintConfusin Failure: fmt.Sprintf("Field '%s' differs only by capitalization to other field in the struct type %s", id.Name, structName), Confidence: 1, Node: id, - Category: "naming", + Category: lint.FailureCategoryNaming, }) } else { bl[normName] = true diff --git a/vendor/github.com/mgechev/revive/rule/confusing_results.go b/vendor/github.com/mgechev/revive/rule/confusing_results.go index 1b79ada9c..1be16f399 100644 --- a/vendor/github.com/mgechev/revive/rule/confusing_results.go +++ b/vendor/github.com/mgechev/revive/rule/confusing_results.go @@ -13,54 +13,42 @@ type ConfusingResultsRule struct{} func (*ConfusingResultsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - fileAst := file.AST - walker := lintConfusingResults{ - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } - - ast.Walk(walker, fileAst) + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) - return failures -} + isFunctionWithMoreThanOneResult := ok && funcDecl.Type.Results != nil && len(funcDecl.Type.Results.List) > 1 + if !isFunctionWithMoreThanOneResult { + continue + } -// Name returns the rule name. -func (*ConfusingResultsRule) Name() string { - return "confusing-results" -} + resultsAreNamed := len(funcDecl.Type.Results.List[0].Names) > 0 + if resultsAreNamed { + continue + } -type lintConfusingResults struct { - onFailure func(lint.Failure) -} + lastType := "" + for _, result := range funcDecl.Type.Results.List { + resultTypeName := gofmt(result.Type) -func (w lintConfusingResults) Visit(n ast.Node) ast.Visitor { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Type.Results == nil || len(fn.Type.Results.List) < 2 { - return w - } - lastType := "" - for _, result := range fn.Type.Results.List { - if len(result.Names) > 0 { - return w - } + if resultTypeName == lastType { + failures = append(failures, lint.Failure{ + Node: result, + Confidence: 1, + Category: lint.FailureCategoryNaming, + Failure: "unnamed results of the same type may be confusing, consider using named results", + }) - t, ok := result.Type.(*ast.Ident) - if !ok { - return w - } + break + } - if t.Name == lastType { - w.onFailure(lint.Failure{ - Node: n, - Confidence: 1, - Category: "naming", - Failure: "unnamed results of the same type may be confusing, consider using named results", - }) - break + lastType = resultTypeName } - lastType = t.Name } - return w + return failures +} + +// Name returns the rule name. +func (*ConfusingResultsRule) Name() string { + return "confusing-results" } diff --git a/vendor/github.com/mgechev/revive/rule/constant_logical_expr.go b/vendor/github.com/mgechev/revive/rule/constant_logical_expr.go index 9e34d3d16..cb5dd746d 100644 --- a/vendor/github.com/mgechev/revive/rule/constant_logical_expr.go +++ b/vendor/github.com/mgechev/revive/rule/constant_logical_expr.go @@ -91,11 +91,11 @@ func (*lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool { return false } -func (w lintConstantLogicalExpr) newFailure(node ast.Node, msg string) { +func (w *lintConstantLogicalExpr) newFailure(node ast.Node, msg string) { w.onFailure(lint.Failure{ Confidence: 1, Node: node, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: msg, }) } diff --git a/vendor/github.com/mgechev/revive/rule/context_as_argument.go b/vendor/github.com/mgechev/revive/rule/context_as_argument.go index 8bc5f8b61..f6a1d8ca4 100644 --- a/vendor/github.com/mgechev/revive/rule/context_as_argument.go +++ b/vendor/github.com/mgechev/revive/rule/context_as_argument.go @@ -4,37 +4,49 @@ import ( "fmt" "go/ast" "strings" - "sync" "github.com/mgechev/revive/lint" ) -// ContextAsArgumentRule lints given else constructs. +// ContextAsArgumentRule suggests that `context.Context` should be the first argument of a function. type ContextAsArgumentRule struct { - allowTypesLUT map[string]struct{} - - configureOnce sync.Once + allowTypes map[string]struct{} } // Apply applies the rule to given file. -func (r *ContextAsArgumentRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *ContextAsArgumentRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - walker := lintContextArguments{ - allowTypesLUT: r.allowTypesLUT, - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } + for _, decl := range file.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + continue // not a function or a function with less than 2 parameters + } - ast.Walk(walker, file.AST) + fnArgs := fn.Type.Params.List + + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + isCtxStillAllowed := true + for _, arg := range fnArgs { + argIsCtx := isPkgDot(arg.Type, "context", "Context") + if argIsCtx && !isCtxStillAllowed { + failures = append(failures, lint.Failure{ + Node: arg, + Category: lint.FailureCategoryArgOrder, + Failure: "context.Context should be the first parameter of a function", + Confidence: 0.9, + }) + + break // only flag one + } - return failures -} + typeName := gofmt(arg.Type) + // a parameter of type context.Context is still allowed if the current arg type is in the allow types LookUpTable + _, isCtxStillAllowed = r.allowTypes[typeName] + } + } -func (r *ContextAsArgumentRule) configure(arguments lint.Arguments) { - r.allowTypesLUT = getAllowTypesFromArguments(arguments) + return failures } // Name returns the rule name. @@ -42,59 +54,35 @@ func (*ContextAsArgumentRule) Name() string { return "context-as-argument" } -type lintContextArguments struct { - allowTypesLUT map[string]struct{} - onFailure func(lint.Failure) -} - -func (w lintContextArguments) Visit(n ast.Node) ast.Visitor { - fn, ok := n.(*ast.FuncDecl) - if !ok || len(fn.Type.Params.List) <= 1 { - return w +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ContextAsArgumentRule) Configure(arguments lint.Arguments) error { + types, err := r.getAllowTypesFromArguments(arguments) + if err != nil { + return err } - - fnArgs := fn.Type.Params.List - - // A context.Context should be the first parameter of a function. - // Flag any that show up after the first. - isCtxStillAllowed := true - for _, arg := range fnArgs { - argIsCtx := isPkgDot(arg.Type, "context", "Context") - if argIsCtx && !isCtxStillAllowed { - w.onFailure(lint.Failure{ - Node: arg, - Category: "arg-order", - Failure: "context.Context should be the first parameter of a function", - Confidence: 0.9, - }) - break // only flag one - } - - typeName := gofmt(arg.Type) - // a parameter of type context.Context is still allowed if the current arg type is in the LUT - _, isCtxStillAllowed = w.allowTypesLUT[typeName] - } - - return nil // avoid visiting the function body + r.allowTypes = types + return nil } -func getAllowTypesFromArguments(args lint.Arguments) map[string]struct{} { +func (*ContextAsArgumentRule) getAllowTypesFromArguments(args lint.Arguments) (map[string]struct{}, error) { allowTypesBefore := []string{} if len(args) >= 1 { argKV, ok := args[0].(map[string]any) if !ok { - panic(fmt.Sprintf("Invalid argument to the context-as-argument rule. Expecting a k,v map, got %T", args[0])) + return nil, fmt.Errorf("invalid argument to the context-as-argument rule. Expecting a k,v map, got %T", args[0]) } for k, v := range argKV { switch k { case "allowTypesBefore": typesBefore, ok := v.(string) if !ok { - panic(fmt.Sprintf("Invalid argument to the context-as-argument.allowTypesBefore rule. Expecting a string, got %T", v)) + return nil, fmt.Errorf("invalid argument to the context-as-argument.allowTypesBefore rule. Expecting a string, got %T", v) } allowTypesBefore = append(allowTypesBefore, strings.Split(typesBefore, ",")...) default: - panic(fmt.Sprintf("Invalid argument to the context-as-argument rule. Unrecognized key %s", k)) + return nil, fmt.Errorf("invalid argument to the context-as-argument rule. Unrecognized key %s", k) } } } @@ -105,5 +93,5 @@ func getAllowTypesFromArguments(args lint.Arguments) map[string]struct{} { } result["context.Context"] = struct{}{} // context.Context is always allowed before another context.Context - return result + return result, nil } diff --git a/vendor/github.com/mgechev/revive/rule/context_keys_type.go b/vendor/github.com/mgechev/revive/rule/context_keys_type.go index 60ccec560..02e1f9fa8 100644 --- a/vendor/github.com/mgechev/revive/rule/context_keys_type.go +++ b/vendor/github.com/mgechev/revive/rule/context_keys_type.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// ContextKeysType lints given else constructs. +// ContextKeysType disallows the usage of basic types in `context.WithValue`. type ContextKeysType struct{} // Apply applies the rule to given file. @@ -74,7 +74,7 @@ func checkContextKeyType(w lintContextKeyTypes, x *ast.CallExpr) { w.onFailure(lint.Failure{ Confidence: 1, Node: x, - Category: "content", + Category: lint.FailureCategoryContent, Failure: fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type), }) } diff --git a/vendor/github.com/mgechev/revive/rule/cyclomatic.go b/vendor/github.com/mgechev/revive/rule/cyclomatic.go index c1a2de97a..088c45c85 100644 --- a/vendor/github.com/mgechev/revive/rule/cyclomatic.go +++ b/vendor/github.com/mgechev/revive/rule/cyclomatic.go @@ -4,87 +4,63 @@ import ( "fmt" "go/ast" "go/token" - "sync" "github.com/mgechev/revive/lint" ) // Based on https://github.com/fzipp/gocyclo -// CyclomaticRule lints given else constructs. +// CyclomaticRule sets restriction for maximum cyclomatic complexity. type CyclomaticRule struct { maxComplexity int - - configureOnce sync.Once } const defaultMaxCyclomaticComplexity = 10 -func (r *CyclomaticRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *CyclomaticRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.maxComplexity = defaultMaxCyclomaticComplexity - return + return nil } complexity, ok := arguments[0].(int64) // Alt. non panicking version if !ok { - panic(fmt.Sprintf("invalid argument for cyclomatic complexity; expected int but got %T", arguments[0])) + return fmt.Errorf("invalid argument for cyclomatic complexity; expected int but got %T", arguments[0]) } r.maxComplexity = int(complexity) + return nil } // Apply applies the rule to given file. -func (r *CyclomaticRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *CyclomaticRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - fileAst := file.AST - - walker := lintCyclomatic{ - file: file, - complexity: r.maxComplexity, - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } - - ast.Walk(walker, fileAst) - - return failures -} - -// Name returns the rule name. -func (*CyclomaticRule) Name() string { - return "cyclomatic" -} - -type lintCyclomatic struct { - file *lint.File - complexity int - onFailure func(lint.Failure) -} - -func (w lintCyclomatic) Visit(_ ast.Node) ast.Visitor { - f := w.file - for _, decl := range f.AST.Decls { + for _, decl := range file.AST.Decls { fn, ok := decl.(*ast.FuncDecl) if !ok { continue } c := complexity(fn) - if c > w.complexity { - w.onFailure(lint.Failure{ + if c > r.maxComplexity { + failures = append(failures, lint.Failure{ Confidence: 1, - Category: "maintenance", + Category: lint.FailureCategoryMaintenance, Failure: fmt.Sprintf("function %s has cyclomatic complexity %d (> max enabled %d)", - funcName(fn), c, w.complexity), + funcName(fn), c, r.maxComplexity), Node: fn, }) } } - return nil + return failures +} + +// Name returns the rule name. +func (*CyclomaticRule) Name() string { + return "cyclomatic" } // funcName returns the name representation of a function or method: diff --git a/vendor/github.com/mgechev/revive/rule/datarace.go b/vendor/github.com/mgechev/revive/rule/datarace.go index 21a7a706e..a189bc3a3 100644 --- a/vendor/github.com/mgechev/revive/rule/datarace.go +++ b/vendor/github.com/mgechev/revive/rule/datarace.go @@ -11,50 +11,47 @@ import ( type DataRaceRule struct{} // Apply applies the rule to given file. -func (*DataRaceRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *DataRaceRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + isGo122 := file.Pkg.IsAtLeastGo122() var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - w := lintDataRaces{onFailure: onFailure, go122for: file.Pkg.IsAtLeastGo122()} + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok || funcDecl.Body == nil { + continue // not function declaration or empty function + } - ast.Walk(w, file.AST) + funcResults := funcDecl.Type.Results - return failures -} + // TODO: ast.Object is deprecated + returnIDs := map[*ast.Object]struct{}{} + if funcResults != nil { + returnIDs = r.extractReturnIDs(funcResults.List) + } -// Name returns the rule name. -func (*DataRaceRule) Name() string { - return "datarace" -} + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } -type lintDataRaces struct { - onFailure func(failure lint.Failure) - go122for bool -} + fl := &lintFunctionForDataRaces{ + onFailure: onFailure, + returnIDs: returnIDs, + rangeIDs: map[*ast.Object]struct{}{}, // TODO: ast.Object is deprecated + go122for: isGo122, + } -func (w lintDataRaces) Visit(n ast.Node) ast.Visitor { - node, ok := n.(*ast.FuncDecl) - if !ok { - return w // not function declaration - } - if node.Body == nil { - return nil // empty body + ast.Walk(fl, funcDecl.Body) } - results := node.Type.Results - - returnIDs := map[*ast.Object]struct{}{} - if results != nil { - returnIDs = w.ExtractReturnIDs(results.List) - } - fl := &lintFunctionForDataRaces{onFailure: w.onFailure, returnIDs: returnIDs, rangeIDs: map[*ast.Object]struct{}{}, go122for: w.go122for} - ast.Walk(fl, node.Body) + return failures +} - return nil +// Name returns the rule name. +func (*DataRaceRule) Name() string { + return "datarace" } -func (lintDataRaces) ExtractReturnIDs(fields []*ast.Field) map[*ast.Object]struct{} { +// TODO: ast.Object is deprecated +func (*DataRaceRule) extractReturnIDs(fields []*ast.Field) map[*ast.Object]struct{} { r := map[*ast.Object]struct{}{} for _, f := range fields { for _, id := range f.Names { @@ -68,9 +65,10 @@ func (lintDataRaces) ExtractReturnIDs(fields []*ast.Field) map[*ast.Object]struc type lintFunctionForDataRaces struct { _ struct{} onFailure func(failure lint.Failure) - returnIDs map[*ast.Object]struct{} - rangeIDs map[*ast.Object]struct{} - go122for bool + returnIDs map[*ast.Object]struct{} // TODO: ast.Object is deprecated + rangeIDs map[*ast.Object]struct{} // TODO: ast.Object is deprecated + + go122for bool } func (w lintFunctionForDataRaces) Visit(node ast.Node) ast.Visitor { @@ -124,14 +122,14 @@ func (w lintFunctionForDataRaces) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 1, Node: id, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: fmt.Sprintf("datarace: range value %s is captured (by-reference) in goroutine", id.Name), }) case isReturnID: w.onFailure(lint.Failure{ Confidence: 0.8, Node: id, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: fmt.Sprintf("potential datarace: return value %s is captured (by-reference) in goroutine", id.Name), }) } diff --git a/vendor/github.com/mgechev/revive/rule/deep_exit.go b/vendor/github.com/mgechev/revive/rule/deep_exit.go index 7b3dd0f82..6f7acd305 100644 --- a/vendor/github.com/mgechev/revive/rule/deep_exit.go +++ b/vendor/github.com/mgechev/revive/rule/deep_exit.go @@ -3,6 +3,9 @@ package rule import ( "fmt" "go/ast" + "strings" + "unicode" + "unicode/utf8" "github.com/mgechev/revive/lint" ) @@ -17,20 +20,7 @@ func (*DeepExitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { failures = append(failures, failure) } - exitFunctions := map[string]map[string]bool{ - "os": {"Exit": true}, - "syscall": {"Exit": true}, - "log": { - "Fatal": true, - "Fatalf": true, - "Fatalln": true, - "Panic": true, - "Panicf": true, - "Panicln": true, - }, - } - - w := lintDeepExit{onFailure, exitFunctions, file.IsTest()} + w := &lintDeepExit{onFailure: onFailure, isTestFile: file.IsTest()} ast.Walk(w, file.AST) return failures } @@ -41,12 +31,11 @@ func (*DeepExitRule) Name() string { } type lintDeepExit struct { - onFailure func(lint.Failure) - exitFunctions map[string]map[string]bool - isTestFile bool + onFailure func(lint.Failure) + isTestFile bool } -func (w lintDeepExit) Visit(node ast.Node) ast.Visitor { +func (w *lintDeepExit) Visit(node ast.Node) ast.Visitor { if fd, ok := node.(*ast.FuncDecl); ok { if w.mustIgnore(fd) { return nil // skip analysis of this function @@ -75,12 +64,11 @@ func (w lintDeepExit) Visit(node ast.Node) ast.Visitor { pkg := id.Name fn := fc.Sel.Name - isACallToExitFunction := w.exitFunctions[pkg] != nil && w.exitFunctions[pkg][fn] - if isACallToExitFunction { + if isCallToExitFunction(pkg, fn) { w.onFailure(lint.Failure{ Confidence: 1, Node: ce, - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Failure: fmt.Sprintf("calls to %s.%s only in main() or init() functions", pkg, fn), }) } @@ -91,5 +79,32 @@ func (w lintDeepExit) Visit(node ast.Node) ast.Visitor { func (w *lintDeepExit) mustIgnore(fd *ast.FuncDecl) bool { fn := fd.Name.Name - return fn == "init" || fn == "main" || (w.isTestFile && fn == "TestMain") + return fn == "init" || fn == "main" || w.isTestMain(fd) || w.isTestExample(fd) +} + +func (w *lintDeepExit) isTestMain(fd *ast.FuncDecl) bool { + return w.isTestFile && fd.Name.Name == "TestMain" +} + +// isTestExample returns true if the function is a testable example function. +// See https://go.dev/blog/examples#examples-are-tests for more information. +// +// Inspired by https://github.com/golang/go/blob/go1.23.0/src/go/doc/example.go#L72-L77 +func (w *lintDeepExit) isTestExample(fd *ast.FuncDecl) bool { + if !w.isTestFile { + return false + } + name := fd.Name.Name + const prefix = "Example" + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Example" is a package level example + return len(fd.Type.Params.List) == 0 + } + r, _ := utf8.DecodeRuneInString(name[len(prefix):]) + if unicode.IsLower(r) { + return false + } + return len(fd.Type.Params.List) == 0 } diff --git a/vendor/github.com/mgechev/revive/rule/defer.go b/vendor/github.com/mgechev/revive/rule/defer.go index f7c716eb6..47771f4f8 100644 --- a/vendor/github.com/mgechev/revive/rule/defer.go +++ b/vendor/github.com/mgechev/revive/rule/defer.go @@ -3,26 +3,29 @@ package rule import ( "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) -// DeferRule lints unused params in functions. +// DeferRule lints gotchas in defer statements. type DeferRule struct { allow map[string]bool - - configureOnce sync.Once } -func (r *DeferRule) configure(arguments lint.Arguments) { - r.allow = r.allowFromArgs(arguments) +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *DeferRule) Configure(arguments lint.Arguments) error { + list, err := r.allowFromArgs(arguments) + if err != nil { + return err + } + r.allow = list + return nil } // Apply applies the rule to given file. -func (r *DeferRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *DeferRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) @@ -39,7 +42,7 @@ func (*DeferRule) Name() string { return "defer" } -func (*DeferRule) allowFromArgs(args lint.Arguments) map[string]bool { +func (*DeferRule) allowFromArgs(args lint.Arguments) (map[string]bool, error) { if len(args) < 1 { allow := map[string]bool{ "loop": true, @@ -50,24 +53,24 @@ func (*DeferRule) allowFromArgs(args lint.Arguments) map[string]bool { "immediate-recover": true, } - return allow + return allow, nil } aa, ok := args[0].([]any) if !ok { - panic(fmt.Sprintf("Invalid argument '%v' for 'defer' rule. Expecting []string, got %T", args[0], args[0])) + return nil, fmt.Errorf("invalid argument '%v' for 'defer' rule. Expecting []string, got %T", args[0], args[0]) } allow := make(map[string]bool, len(aa)) for _, subcase := range aa { sc, ok := subcase.(string) if !ok { - panic(fmt.Sprintf("Invalid argument '%v' for 'defer' rule. Expecting string, got %T", subcase, subcase)) + return nil, fmt.Errorf("invalid argument '%v' for 'defer' rule. Expecting string, got %T", subcase, subcase) } allow[sc] = true } - return allow + return allow, nil } type lintDeferRule struct { @@ -91,7 +94,7 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { return nil case *ast.ReturnStmt: if len(n.Results) != 0 && w.inADefer && w.inAFuncLit { - w.newFailure("return in a defer function has no effect", n, 1.0, "logic", "return") + w.newFailure("return in a defer function has no effect", n, 1.0, lint.FailureCategoryLogic, "return") } case *ast.CallExpr: isCallToRecover := isIdent(n.Fun, "recover") @@ -100,13 +103,13 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { // func fn() { recover() } // // confidence is not 1 because recover can be in a function that is deferred elsewhere - w.newFailure("recover must be called inside a deferred function", n, 0.8, "logic", "recover") + w.newFailure("recover must be called inside a deferred function", n, 0.8, lint.FailureCategoryLogic, "recover") case w.inADefer && !w.inAFuncLit && isCallToRecover: // defer helper(recover()) // // confidence is not truly 1 because this could be in a correctly-deferred func, // but it is very likely to be a misunderstanding of defer's behavior around arguments. - w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, "logic", "immediate-recover") + w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, lint.FailureCategoryLogic, "immediate-recover") } return nil // no need to analyze the arguments of the function call case *ast.DeferStmt: @@ -115,7 +118,7 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { // // confidence is not truly 1 because this could be in a correctly-deferred func, // but normally this doesn't suppress a panic, and even if it did it would silently discard the value. - w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, "logic", "immediate-recover") + w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, lint.FailureCategoryLogic, "immediate-recover") } w.visitSubtree(n.Call.Fun, true, false, false) for _, a := range n.Call.Args { @@ -128,17 +131,17 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { } if w.inALoop { - w.newFailure("prefer not to defer inside loops", n, 1.0, "bad practice", "loop") + w.newFailure("prefer not to defer inside loops", n, 1.0, lint.FailureCategoryBadPractice, "loop") } switch fn := n.Call.Fun.(type) { case *ast.CallExpr: - w.newFailure("prefer not to defer chains of function calls", fn, 1.0, "bad practice", "call-chain") + w.newFailure("prefer not to defer chains of function calls", fn, 1.0, lint.FailureCategoryBadPractice, "call-chain") case *ast.SelectorExpr: if id, ok := fn.X.(*ast.Ident); ok { isMethodCall := id != nil && id.Obj != nil && id.Obj.Kind == ast.Typ if isMethodCall { - w.newFailure("be careful when deferring calls to methods without pointer receiver", fn, 0.8, "bad practice", "method-call") + w.newFailure("be careful when deferring calls to methods without pointer receiver", fn, 0.8, lint.FailureCategoryBadPractice, "method-call") } } } @@ -160,7 +163,7 @@ func (w lintDeferRule) visitSubtree(n ast.Node, inADefer, inALoop, inAFuncLit bo ast.Walk(nw, n) } -func (w lintDeferRule) newFailure(msg string, node ast.Node, confidence float64, cat, subcase string) { +func (w lintDeferRule) newFailure(msg string, node ast.Node, confidence float64, cat lint.FailureCategory, subcase string) { if !w.allow[subcase] { return } diff --git a/vendor/github.com/mgechev/revive/rule/dot_imports.go b/vendor/github.com/mgechev/revive/rule/dot_imports.go index f6c7fbcfb..3ee4999a3 100644 --- a/vendor/github.com/mgechev/revive/rule/dot_imports.go +++ b/vendor/github.com/mgechev/revive/rule/dot_imports.go @@ -3,22 +3,17 @@ package rule import ( "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) -// DotImportsRule lints given else constructs. +// DotImportsRule forbids . imports. type DotImportsRule struct { allowedPackages allowPackages - - configureOnce sync.Once } // Apply applies the rule to given file. -func (r *DotImportsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *DotImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure fileAst := file.AST @@ -41,30 +36,34 @@ func (*DotImportsRule) Name() string { return "dot-imports" } -func (r *DotImportsRule) configure(arguments lint.Arguments) { - r.allowedPackages = make(allowPackages) +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *DotImportsRule) Configure(arguments lint.Arguments) error { + r.allowedPackages = allowPackages{} if len(arguments) == 0 { - return + return nil } args, ok := arguments[0].(map[string]any) if !ok { - panic(fmt.Sprintf("Invalid argument to the dot-imports rule. Expecting a k,v map, got %T", arguments[0])) + return fmt.Errorf("invalid argument to the dot-imports rule. Expecting a k,v map, got %T", arguments[0]) } if allowedPkgArg, ok := args["allowedPackages"]; ok { pkgs, ok := allowedPkgArg.([]any) if !ok { - panic(fmt.Sprintf("Invalid argument to the dot-imports rule, []string expected. Got '%v' (%T)", allowedPkgArg, allowedPkgArg)) + return fmt.Errorf("invalid argument to the dot-imports rule, []string expected. Got '%v' (%T)", allowedPkgArg, allowedPkgArg) } for _, p := range pkgs { pkg, ok := p.(string) if !ok { - panic(fmt.Sprintf("Invalid argument to the dot-imports rule, string expected. Got '%v' (%T)", p, p)) + return fmt.Errorf("invalid argument to the dot-imports rule, string expected. Got '%v' (%T)", p, p) } r.allowedPackages.add(pkg) } } + return nil } type lintImports struct { @@ -82,7 +81,7 @@ func (w lintImports) Visit(_ ast.Node) ast.Visitor { Confidence: 1, Failure: "should not use dot imports", Node: importSpec, - Category: "imports", + Category: lint.FailureCategoryImports, }) } } diff --git a/vendor/github.com/mgechev/revive/rule/duplicated_imports.go b/vendor/github.com/mgechev/revive/rule/duplicated_imports.go index 2b177fac6..60955c427 100644 --- a/vendor/github.com/mgechev/revive/rule/duplicated_imports.go +++ b/vendor/github.com/mgechev/revive/rule/duplicated_imports.go @@ -6,7 +6,7 @@ import ( "github.com/mgechev/revive/lint" ) -// DuplicatedImportsRule lints given else constructs. +// DuplicatedImportsRule looks for packages that are imported two or more times. type DuplicatedImportsRule struct{} // Apply applies the rule to given file. @@ -22,7 +22,7 @@ func (*DuplicatedImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Fa Confidence: 1, Failure: fmt.Sprintf("Package %s already imported", path), Node: imp, - Category: "imports", + Category: lint.FailureCategoryImports, }) continue } diff --git a/vendor/github.com/mgechev/revive/rule/early_return.go b/vendor/github.com/mgechev/revive/rule/early_return.go index 62d491f27..41557b019 100644 --- a/vendor/github.com/mgechev/revive/rule/early_return.go +++ b/vendor/github.com/mgechev/revive/rule/early_return.go @@ -13,7 +13,7 @@ type EarlyReturnRule struct{} // Apply applies the rule to given file. func (e *EarlyReturnRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - return ifelse.Apply(e, file.AST, ifelse.TargetIf, args) + return ifelse.Apply(e.checkIfElse, file.AST, ifelse.TargetIf, args) } // Name returns the rule name. @@ -21,31 +21,40 @@ func (*EarlyReturnRule) Name() string { return "early-return" } -// CheckIfElse evaluates the rule against an ifelse.Chain and returns a failure message if applicable. -func (*EarlyReturnRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) string { - if !chain.Else.Deviates() { - // this rule only applies if the else-block deviates control flow - return "" +func (*EarlyReturnRule) checkIfElse(chain ifelse.Chain, args ifelse.Args) (string, bool) { + if chain.HasElse { + if !chain.Else.BranchKind.Deviates() { + // this rule only applies if the else-block deviates control flow + return "", false + } + } else if !args.AllowJump || !chain.AtBlockEnd || !chain.BlockEndKind.Deviates() || chain.If.IsShort() { + // this kind of refactor requires introducing a new indented "return", "continue" or "break" statement, + // so ignore unless we are able to outdent multiple statements in exchange. + return "", false } if chain.HasPriorNonDeviating && !chain.If.IsEmpty() { // if we de-indent this block then a previous branch - // might flow into it, affecting program behaviour - return "" + // might flow into it, affecting program behavior + return "", false } - if chain.If.Deviates() { + if chain.HasElse && chain.If.Deviates() { // avoid overlapping with superfluous-else - return "" + return "", false } - if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.If.HasDecls) { + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.If.HasDecls()) { // avoid increasing variable scope - return "" + return "", false + } + + if !chain.HasElse { + return fmt.Sprintf("if c { ... } can be rewritten if !c { %v } ... to reduce nesting", chain.BlockEndKind), true } if chain.If.IsEmpty() { - return fmt.Sprintf("if c { } else { %[1]v } can be simplified to if !c { %[1]v }", chain.Else) + return fmt.Sprintf("if c { } else %[1]v can be simplified to if !c %[1]v", chain.Else), true } - return fmt.Sprintf("if c { ... } else { %[1]v } can be simplified to if !c { %[1]v } ...", chain.Else) + return fmt.Sprintf("if c { ... } else %[1]v can be simplified to if !c %[1]v ...", chain.Else), true } diff --git a/vendor/github.com/mgechev/revive/rule/empty_block.go b/vendor/github.com/mgechev/revive/rule/empty_block.go index 25a052a0e..210692c94 100644 --- a/vendor/github.com/mgechev/revive/rule/empty_block.go +++ b/vendor/github.com/mgechev/revive/rule/empty_block.go @@ -6,7 +6,7 @@ import ( "github.com/mgechev/revive/lint" ) -// EmptyBlockRule lints given else constructs. +// EmptyBlockRule warns on empty code blocks. type EmptyBlockRule struct{} // Apply applies the rule to given file. @@ -17,7 +17,7 @@ func (*EmptyBlockRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { failures = append(failures, failure) } - w := lintEmptyBlock{make(map[*ast.BlockStmt]bool), onFailure} + w := lintEmptyBlock{map[*ast.BlockStmt]bool{}, onFailure} ast.Walk(w, file.AST) return failures } @@ -55,7 +55,7 @@ func (w lintEmptyBlock) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 0.9, Node: n, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: "this block is empty, you can remove it", }) return nil // skip visiting the range subtree (it will produce a duplicated failure) @@ -65,7 +65,7 @@ func (w lintEmptyBlock) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: "this block is empty, you can remove it", }) } diff --git a/vendor/github.com/mgechev/revive/rule/empty_lines.go b/vendor/github.com/mgechev/revive/rule/empty_lines.go index 2710a8979..a2f8dc6fd 100644 --- a/vendor/github.com/mgechev/revive/rule/empty_lines.go +++ b/vendor/github.com/mgechev/revive/rule/empty_lines.go @@ -60,7 +60,7 @@ func (w lintEmptyLines) checkStart(block *ast.BlockStmt) { w.onFailure(lint.Failure{ Confidence: 1, Node: block, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "extra empty line at the start of a block", }) } @@ -79,7 +79,7 @@ func (w lintEmptyLines) checkEnd(block *ast.BlockStmt) { w.onFailure(lint.Failure{ Confidence: 1, Node: block, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "extra empty line at the end of a block", }) } diff --git a/vendor/github.com/mgechev/revive/rule/enforce_map_style.go b/vendor/github.com/mgechev/revive/rule/enforce_map_style.go index 7ddf31e35..df9793bb6 100644 --- a/vendor/github.com/mgechev/revive/rule/enforce_map_style.go +++ b/vendor/github.com/mgechev/revive/rule/enforce_map_style.go @@ -3,7 +3,6 @@ package rule import ( "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) @@ -40,39 +39,38 @@ func mapStyleFromString(s string) (enforceMapStyleType, error) { // EnforceMapStyleRule implements a rule to enforce `make(map[type]type)` over `map[type]type{}`. type EnforceMapStyleRule struct { enforceMapStyle enforceMapStyleType - - configureOnce sync.Once } -func (r *EnforceMapStyleRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *EnforceMapStyleRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.enforceMapStyle = enforceMapStyleTypeAny - return + return nil } enforceMapStyle, ok := arguments[0].(string) if !ok { - panic(fmt.Sprintf("Invalid argument '%v' for 'enforce-map-style' rule. Expecting string, got %T", arguments[0], arguments[0])) + return fmt.Errorf("invalid argument '%v' for 'enforce-map-style' rule. Expecting string, got %T", arguments[0], arguments[0]) } var err error r.enforceMapStyle, err = mapStyleFromString(enforceMapStyle) if err != nil { - panic(fmt.Sprintf("Invalid argument to the enforce-map-style rule: %v", err)) + return fmt.Errorf("invalid argument to the enforce-map-style rule: %w", err) } + + return nil } // Apply applies the rule to given file. -func (r *EnforceMapStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *EnforceMapStyleRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { if r.enforceMapStyle == enforceMapStyleTypeAny { // this linter is not configured return nil } - var failures []lint.Failure - astFile := file.AST ast.Inspect(astFile, func(n ast.Node) bool { switch v := n.(type) { @@ -93,7 +91,7 @@ func (r *EnforceMapStyleRule) Apply(file *lint.File, arguments lint.Arguments) [ failures = append(failures, lint.Failure{ Confidence: 1, Node: v, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "use make(map[type]type) instead of map[type]type{}", }) case *ast.CallExpr: @@ -121,7 +119,7 @@ func (r *EnforceMapStyleRule) Apply(file *lint.File, arguments lint.Arguments) [ failures = append(failures, lint.Failure{ Confidence: 1, Node: v.Args[0], - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "use map[type]type{} instead of make(map[type]type)", }) } diff --git a/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go b/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go index 3f9712aef..ab466f5f8 100644 --- a/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go +++ b/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go @@ -3,7 +3,6 @@ package rule import ( "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) @@ -16,14 +15,14 @@ const ( enforceRepeatedArgTypeStyleTypeFull enforceRepeatedArgTypeStyleType = "full" ) -func repeatedArgTypeStyleFromString(s string) enforceRepeatedArgTypeStyleType { +func repeatedArgTypeStyleFromString(s string) (enforceRepeatedArgTypeStyleType, error) { switch s { case string(enforceRepeatedArgTypeStyleTypeAny), "": - return enforceRepeatedArgTypeStyleTypeAny + return enforceRepeatedArgTypeStyleTypeAny, nil case string(enforceRepeatedArgTypeStyleTypeShort): - return enforceRepeatedArgTypeStyleTypeShort + return enforceRepeatedArgTypeStyleTypeShort, nil case string(enforceRepeatedArgTypeStyleTypeFull): - return enforceRepeatedArgTypeStyleTypeFull + return enforceRepeatedArgTypeStyleTypeFull, nil default: err := fmt.Errorf( "invalid repeated arg type style: %s (expecting one of %v)", @@ -35,7 +34,7 @@ func repeatedArgTypeStyleFromString(s string) enforceRepeatedArgTypeStyleType { }, ) - panic(fmt.Sprintf("Invalid argument to the enforce-repeated-arg-type-style rule: %v", err)) + return "", fmt.Errorf("invalid argument to the enforce-repeated-arg-type-style rule: %w", err) } } @@ -43,50 +42,66 @@ func repeatedArgTypeStyleFromString(s string) enforceRepeatedArgTypeStyleType { type EnforceRepeatedArgTypeStyleRule struct { funcArgStyle enforceRepeatedArgTypeStyleType funcRetValStyle enforceRepeatedArgTypeStyleType - - configureOnce sync.Once } -func (r *EnforceRepeatedArgTypeStyleRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *EnforceRepeatedArgTypeStyleRule) Configure(arguments lint.Arguments) error { r.funcArgStyle = enforceRepeatedArgTypeStyleTypeAny r.funcRetValStyle = enforceRepeatedArgTypeStyleTypeAny if len(arguments) == 0 { - return + return nil } switch funcArgStyle := arguments[0].(type) { case string: - r.funcArgStyle = repeatedArgTypeStyleFromString(funcArgStyle) - r.funcRetValStyle = repeatedArgTypeStyleFromString(funcArgStyle) + argstyle, err := repeatedArgTypeStyleFromString(funcArgStyle) + if err != nil { + return err + } + r.funcArgStyle = argstyle + valstyle, err := repeatedArgTypeStyleFromString(funcArgStyle) + if err != nil { + return err + } + r.funcRetValStyle = valstyle case map[string]any: // expecting map[string]string for k, v := range funcArgStyle { switch k { case "funcArgStyle": val, ok := v.(string) if !ok { - panic(fmt.Sprintf("Invalid map value type for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v)) + return fmt.Errorf("invalid map value type for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v) + } + valstyle, err := repeatedArgTypeStyleFromString(val) + if err != nil { + return err } - r.funcArgStyle = repeatedArgTypeStyleFromString(val) + r.funcArgStyle = valstyle case "funcRetValStyle": val, ok := v.(string) if !ok { - panic(fmt.Sprintf("Invalid map value '%v' for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v, v)) + return fmt.Errorf("invalid map value '%v' for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v, v) + } + argstyle, err := repeatedArgTypeStyleFromString(val) + if err != nil { + return err } - r.funcRetValStyle = repeatedArgTypeStyleFromString(val) + r.funcRetValStyle = argstyle default: - panic(fmt.Sprintf("Invalid map key for 'enforce-repeated-arg-type-style' rule. Expecting 'funcArgStyle' or 'funcRetValStyle', got %v", k)) + return fmt.Errorf("invalid map key for 'enforce-repeated-arg-type-style' rule. Expecting 'funcArgStyle' or 'funcRetValStyle', got %v", k) } } default: - panic(fmt.Sprintf("Invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0])) + return fmt.Errorf("invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0]) } + return nil } // Apply applies the rule to a given file. -func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeAny && r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeAny { // This linter is not configured, return no failures. return nil @@ -98,24 +113,23 @@ func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint. ast.Inspect(astFile, func(n ast.Node) bool { switch fn := n.(type) { case *ast.FuncDecl: - if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeFull { + switch r.funcArgStyle { + case enforceRepeatedArgTypeStyleTypeFull: if fn.Type.Params != nil { for _, field := range fn.Type.Params.List { if len(field.Names) > 1 { failures = append(failures, lint.Failure{ Confidence: 1, Node: field, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "argument types should not be omitted", }) } } } - } - - if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeShort { - var prevType ast.Expr + case enforceRepeatedArgTypeStyleTypeShort: if fn.Type.Params != nil { + var prevType ast.Expr for _, field := range fn.Type.Params.List { prevTypeStr := gofmt(prevType) currentTypeStr := gofmt(field.Type) @@ -123,7 +137,7 @@ func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint. failures = append(failures, lint.Failure{ Confidence: 1, Node: prevType, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: fmt.Sprintf("repeated argument type %q can be omitted", prevTypeStr), }) } @@ -132,24 +146,23 @@ func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint. } } - if r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeFull { + switch r.funcRetValStyle { + case enforceRepeatedArgTypeStyleTypeFull: if fn.Type.Results != nil { for _, field := range fn.Type.Results.List { if len(field.Names) > 1 { failures = append(failures, lint.Failure{ Confidence: 1, Node: field, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: "return types should not be omitted", }) } } } - } - - if r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeShort { - var prevType ast.Expr + case enforceRepeatedArgTypeStyleTypeShort: if fn.Type.Results != nil { + var prevType ast.Expr for _, field := range fn.Type.Results.List { prevTypeStr := gofmt(prevType) currentTypeStr := gofmt(field.Type) @@ -157,7 +170,7 @@ func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint. failures = append(failures, lint.Failure{ Confidence: 1, Node: prevType, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: fmt.Sprintf("repeated return type %q can be omitted", prevTypeStr), }) } diff --git a/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go b/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go index 7170379d9..ab503094f 100644 --- a/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go +++ b/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go @@ -3,7 +3,6 @@ package rule import ( "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) @@ -44,32 +43,32 @@ func sliceStyleFromString(s string) (enforceSliceStyleType, error) { // EnforceSliceStyleRule implements a rule to enforce `make([]type)` over `[]type{}`. type EnforceSliceStyleRule struct { enforceSliceStyle enforceSliceStyleType - - configureOnce sync.Once } -func (r *EnforceSliceStyleRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *EnforceSliceStyleRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.enforceSliceStyle = enforceSliceStyleTypeAny - return + return nil } enforceSliceStyle, ok := arguments[0].(string) if !ok { - panic(fmt.Sprintf("Invalid argument '%v' for 'enforce-slice-style' rule. Expecting string, got %T", arguments[0], arguments[0])) + return fmt.Errorf("invalid argument '%v' for 'enforce-slice-style' rule. Expecting string, got %T", arguments[0], arguments[0]) } var err error r.enforceSliceStyle, err = sliceStyleFromString(enforceSliceStyle) if err != nil { - panic(fmt.Sprintf("Invalid argument to the enforce-slice-style rule: %v", err)) + return fmt.Errorf("invalid argument to the enforce-slice-style rule: %w", err) } + return nil } // Apply applies the rule to given file. -func (r *EnforceSliceStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *EnforceSliceStyleRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { if r.enforceSliceStyle == enforceSliceStyleTypeAny { // this linter is not configured return nil @@ -106,7 +105,7 @@ func (r *EnforceSliceStyleRule) Apply(file *lint.File, arguments lint.Arguments) failures = append(failures, lint.Failure{ Confidence: 1, Node: v, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: failureMessage, }) case *ast.CallExpr: @@ -166,7 +165,7 @@ func (r *EnforceSliceStyleRule) Apply(file *lint.File, arguments lint.Arguments) failures = append(failures, lint.Failure{ Confidence: 1, Node: v.Args[0], - Category: "style", + Category: lint.FailureCategoryStyle, Failure: failureMessage, }) } diff --git a/vendor/github.com/mgechev/revive/rule/error_naming.go b/vendor/github.com/mgechev/revive/rule/error_naming.go index a4f24f3f0..5ae490813 100644 --- a/vendor/github.com/mgechev/revive/rule/error_naming.go +++ b/vendor/github.com/mgechev/revive/rule/error_naming.go @@ -9,7 +9,7 @@ import ( "github.com/mgechev/revive/lint" ) -// ErrorNamingRule lints given else constructs. +// ErrorNamingRule lints naming of error variables. type ErrorNamingRule struct{} // Apply applies the rule to given file. @@ -69,7 +69,7 @@ func (w lintErrors) Visit(_ ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Node: id, Confidence: 0.9, - Category: "naming", + Category: lint.FailureCategoryNaming, Failure: fmt.Sprintf("error var %s should have name of the form %sFoo", id.Name, prefix), }) } diff --git a/vendor/github.com/mgechev/revive/rule/error_return.go b/vendor/github.com/mgechev/revive/rule/error_return.go index a724e001c..19f10a661 100644 --- a/vendor/github.com/mgechev/revive/rule/error_return.go +++ b/vendor/github.com/mgechev/revive/rule/error_return.go @@ -6,23 +6,41 @@ import ( "github.com/mgechev/revive/lint" ) -// ErrorReturnRule lints given else constructs. +// ErrorReturnRule ensures that the error return parameter is the last parameter. type ErrorReturnRule struct{} // Apply applies the rule to given file. func (*ErrorReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - fileAst := file.AST - walker := lintErrorReturn{ - file: file, - fileAst: fileAst, - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + isFunctionWithMoreThanOneResult := ok && funcDecl.Type.Results != nil && len(funcDecl.Type.Results.List) > 1 + if !isFunctionWithMoreThanOneResult { + continue + } + + funcResults := funcDecl.Type.Results.List + isLastResultError := isIdent(funcResults[len(funcResults)-1].Type, "error") + if isLastResultError { + continue + } - ast.Walk(walker, fileAst) + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range funcResults[:len(funcResults)-1] { + if isIdent(r.Type, "error") { + failures = append(failures, lint.Failure{ + Category: lint.FailureCategoryStyle, + Confidence: 0.9, + Node: funcDecl, + Failure: "error should be the last type when returning multiple items", + }) + + break // only flag one + } + } + } return failures } @@ -31,37 +49,3 @@ func (*ErrorReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure func (*ErrorReturnRule) Name() string { return "error-return" } - -type lintErrorReturn struct { - file *lint.File - fileAst *ast.File - onFailure func(lint.Failure) -} - -func (w lintErrorReturn) Visit(n ast.Node) ast.Visitor { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Type.Results == nil { - return w - } - ret := fn.Type.Results.List - if len(ret) <= 1 { - return w - } - if isIdent(ret[len(ret)-1].Type, "error") { - return nil - } - // An error return parameter should be the last parameter. - // Flag any error parameters found before the last. - for _, r := range ret[:len(ret)-1] { - if isIdent(r.Type, "error") { - w.onFailure(lint.Failure{ - Category: "arg-order", - Confidence: 0.9, - Node: fn, - Failure: "error should be the last type when returning multiple items", - }) - break // only flag one - } - } - return w -} diff --git a/vendor/github.com/mgechev/revive/rule/error_strings.go b/vendor/github.com/mgechev/revive/rule/error_strings.go index 97a0f4d06..839a613aa 100644 --- a/vendor/github.com/mgechev/revive/rule/error_strings.go +++ b/vendor/github.com/mgechev/revive/rule/error_strings.go @@ -1,25 +1,26 @@ package rule import ( + "fmt" "go/ast" "go/token" "strconv" "strings" - "sync" "unicode" "unicode/utf8" "github.com/mgechev/revive/lint" ) -// ErrorStringsRule lints given else constructs. +// ErrorStringsRule lints error strings. type ErrorStringsRule struct { errorFunctions map[string]map[string]struct{} - - configureOnce sync.Once } -func (r *ErrorStringsRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ErrorStringsRule) Configure(arguments lint.Arguments) error { r.errorFunctions = map[string]map[string]struct{}{ "fmt": { "Errorf": {}, @@ -46,16 +47,15 @@ func (r *ErrorStringsRule) configure(arguments lint.Arguments) { } } if len(invalidCustomFunctions) != 0 { - panic("found invalid custom function: " + strings.Join(invalidCustomFunctions, ",")) + return fmt.Errorf("found invalid custom function: %s", strings.Join(invalidCustomFunctions, ",")) } + return nil } // Apply applies the rule to given file. -func (r *ErrorStringsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { +func (r *ErrorStringsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - r.configureOnce.Do(func() { r.configure(arguments) }) - fileAst := file.AST walker := lintErrorStrings{ file: file, @@ -115,7 +115,7 @@ func (w lintErrorStrings) Visit(n ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Node: str, Confidence: conf, - Category: "errors", + Category: lint.FailureCategoryErrors, Failure: "error strings should not be capitalized or end with punctuation or a newline", }) return w diff --git a/vendor/github.com/mgechev/revive/rule/errorf.go b/vendor/github.com/mgechev/revive/rule/errorf.go index 1588a745d..cd56fe29c 100644 --- a/vendor/github.com/mgechev/revive/rule/errorf.go +++ b/vendor/github.com/mgechev/revive/rule/errorf.go @@ -9,7 +9,7 @@ import ( "github.com/mgechev/revive/lint" ) -// ErrorfRule lints given else constructs. +// ErrorfRule suggests using `fmt.Errorf` instead of `errors.New(fmt.Sprintf())`. type ErrorfRule struct{} // Apply applies the rule to given file. @@ -69,7 +69,7 @@ func (w lintErrorf) Visit(n ast.Node) ast.Visitor { } failure := lint.Failure{ - Category: "errors", + Category: lint.FailureCategoryErrors, Node: n, Confidence: 1, Failure: fmt.Sprintf("should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", w.file.Render(se), errorfPrefix), diff --git a/vendor/github.com/mgechev/revive/rule/exported.go b/vendor/github.com/mgechev/revive/rule/exported.go index 7ee27b309..7d59c4de5 100644 --- a/vendor/github.com/mgechev/revive/rule/exported.go +++ b/vendor/github.com/mgechev/revive/rule/exported.go @@ -5,7 +5,6 @@ import ( "go/ast" "go/token" "strings" - "sync" "unicode" "unicode/utf8" @@ -25,9 +24,11 @@ type disabledChecks struct { Var bool } -const checkNamePrivateReceivers = "privateReceivers" -const checkNamePublicInterfaces = "publicInterfaces" -const checkNameStuttering = "stuttering" +const ( + checkNamePrivateReceivers = "privateReceivers" + checkNamePublicInterfaces = "publicInterfaces" + checkNameStuttering = "stuttering" +) // isDisabled returns true if the given check is disabled, false otherwise func (dc *disabledChecks) isDisabled(checkName string) bool { @@ -53,15 +54,25 @@ func (dc *disabledChecks) isDisabled(checkName string) bool { } } -// ExportedRule lints given else constructs. +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, + "Unwrap": true, +} + +// ExportedRule lints naming and commenting conventions on exported symbols. type ExportedRule struct { stuttersMsg string disabledChecks disabledChecks - - configureOnce sync.Once } -func (r *ExportedRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ExportedRule) Configure(arguments lint.Arguments) error { r.disabledChecks = disabledChecks{PrivateReceivers: true, PublicInterfaces: true} r.stuttersMsg = "stutters" for _, flag := range arguments { @@ -87,18 +98,18 @@ func (r *ExportedRule) configure(arguments lint.Arguments) { case "disableChecksOnVariables": r.disabledChecks.Var = true default: - panic(fmt.Sprintf("Unknown configuration flag %s for %s rule", flag, r.Name())) + return fmt.Errorf("unknown configuration flag %s for %s rule", flag, r.Name()) } default: - panic(fmt.Sprintf("Invalid argument for the %s rule: expecting a string, got %T", r.Name(), flag)) + return fmt.Errorf("invalid argument for the %s rule: expecting a string, got %T", r.Name(), flag) } } + + return nil } // Apply applies the rule to given file. -func (r *ExportedRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *ExportedRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure if file.IsTest() { return failures @@ -112,7 +123,7 @@ func (r *ExportedRule) Apply(file *lint.File, args lint.Arguments) []lint.Failur onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, - genDeclMissingComments: make(map[*ast.GenDecl]bool), + genDeclMissingComments: map[*ast.GenDecl]bool{}, stuttersMsg: r.stuttersMsg, disabledChecks: r.disabledChecks, } @@ -171,11 +182,11 @@ func (w *lintExported) lintFuncDoc(fn *ast.FuncDecl) { return } - if fn.Doc == nil { + if !hasTextComment(fn.Doc) { w.onFailure(lint.Failure{ Node: fn, Confidence: 1, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf("exported %s %s should have comment or be unexported", kind, name), }) return @@ -187,7 +198,7 @@ func (w *lintExported) lintFuncDoc(fn *ast.FuncDecl) { w.onFailure(lint.Failure{ Node: fn.Doc, Confidence: 0.8, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf(`comment on exported %s %s should be of the form "%s..."`, kind, name, prefix), }) } @@ -221,7 +232,7 @@ func (w *lintExported) checkStutter(id *ast.Ident, thing string) { w.onFailure(lint.Failure{ Node: id, Confidence: 0.8, - Category: "naming", + Category: lint.FailureCategoryNaming, Failure: fmt.Sprintf("%s name will be used as %s.%s by other packages, and that %s; consider calling this %s", thing, pkg, name, w.stuttersMsg, rem), }) } @@ -236,11 +247,11 @@ func (w *lintExported) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { return } - if doc == nil { + if !hasTextComment(doc) { w.onFailure(lint.Failure{ Node: t, Confidence: 1, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf("exported type %v should have comment or be unexported", t.Name), }) return @@ -252,8 +263,8 @@ func (w *lintExported) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { if t.Name.Name == a { continue } - if strings.HasPrefix(s, a+" ") { - s = s[len(a)+1:] + var found bool + if s, found = strings.CutPrefix(s, a+" "); found { break } } @@ -267,7 +278,7 @@ func (w *lintExported) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { w.onFailure(lint.Failure{ Node: doc, Confidence: 1, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf(`comment on exported type %v should be of the form "%s..." (with optional leading article)`, t.Name, expectedPrefix), }) } @@ -287,7 +298,7 @@ func (w *lintExported) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genD for _, n := range vs.Names[1:] { if ast.IsExported(n.Name) { w.onFailure(lint.Failure{ - Category: "comments", + Category: lint.FailureCategoryComments, Confidence: 1, Failure: fmt.Sprintf("exported %s %s should have its own declaration", kind, n.Name), Node: vs, @@ -303,7 +314,7 @@ func (w *lintExported) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genD return } - if vs.Doc == nil && gd.Doc == nil { + if !hasTextComment(vs.Doc) && !hasTextComment(gd.Doc) { if genDeclMissingComments[gd] { return } @@ -314,23 +325,23 @@ func (w *lintExported) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genD w.onFailure(lint.Failure{ Confidence: 1, Node: vs, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf("exported %s %s should have comment%s or be unexported", kind, name, block), }) genDeclMissingComments[gd] = true return } // If this GenDecl has parens and a comment, we don't check its comment form. - if gd.Doc != nil && gd.Lparen.IsValid() { + if hasTextComment(gd.Doc) && gd.Lparen.IsValid() { return } // The relevant text to check will be on either vs.Doc or gd.Doc. // Use vs.Doc preferentially. var doc *ast.CommentGroup switch { - case vs.Doc != nil: + case hasTextComment(vs.Doc): doc = vs.Doc - case vs.Comment != nil && gd.Doc == nil: + case hasTextComment(vs.Comment) && !hasTextComment(gd.Doc): doc = vs.Comment default: doc = gd.Doc @@ -342,12 +353,25 @@ func (w *lintExported) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genD w.onFailure(lint.Failure{ Confidence: 1, Node: doc, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf(`comment on exported %s %s should be of the form "%s..."`, kind, name, prefix), }) } } +// hasTextComment returns true if the comment contains a text comment +// e.g. //go:embed foo.txt a directive comment, not a text comment +// e.g. //nolint:whatever is a directive comment, not a text comment +func hasTextComment(comment *ast.CommentGroup) bool { + if comment == nil { + return false + } + + // a comment could be directive and not a text comment + text := comment.Text() + return text != "" +} + // normalizeText is a helper function that normalizes comment strings by: // * removing one leading space // @@ -377,7 +401,7 @@ func (w *lintExported) Visit(n ast.Node) ast.Visitor { case *ast.TypeSpec: // inside a GenDecl, which usually has the doc doc := v.Doc - if doc == nil { + if !hasTextComment(doc) { doc = w.lastGen.Doc } w.lintTypeDoc(v, doc) @@ -413,11 +437,11 @@ func (w *lintExported) lintInterfaceMethod(typeName string, m *ast.Field) { return } name := m.Names[0].Name - if m.Doc == nil { + if !hasTextComment(m.Doc) { w.onFailure(lint.Failure{ Node: m, Confidence: 1, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf("public interface method %s.%s should be commented", typeName, name), }) return @@ -428,7 +452,7 @@ func (w *lintExported) lintInterfaceMethod(typeName string, m *ast.Field) { w.onFailure(lint.Failure{ Node: m.Doc, Confidence: 0.8, - Category: "comments", + Category: lint.FailureCategoryComments, Failure: fmt.Sprintf(`comment on exported interface method %s.%s should be of the form "%s..."`, typeName, name, expectedPrefix), }) } diff --git a/vendor/github.com/mgechev/revive/rule/file_header.go b/vendor/github.com/mgechev/revive/rule/file_header.go index 52513d8e8..53d7ea9d0 100644 --- a/vendor/github.com/mgechev/revive/rule/file_header.go +++ b/vendor/github.com/mgechev/revive/rule/file_header.go @@ -3,16 +3,13 @@ package rule import ( "fmt" "regexp" - "sync" "github.com/mgechev/revive/lint" ) -// FileHeaderRule lints given else constructs. +// FileHeaderRule lints the header that each file should have. type FileHeaderRule struct { header string - - configureOnce sync.Once } var ( @@ -20,22 +17,24 @@ var ( singleRegexp = regexp.MustCompile("^//") ) -func (r *FileHeaderRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *FileHeaderRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { - return + return nil } var ok bool r.header, ok = arguments[0].(string) if !ok { - panic(fmt.Sprintf("invalid argument for \"file-header\" rule: argument should be a string, got %T", arguments[0])) + return fmt.Errorf(`invalid argument for "file-header" rule: argument should be a string, got %T`, arguments[0]) } + return nil } // Apply applies the rule to given file. -func (r *FileHeaderRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *FileHeaderRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { if r.header == "" { return nil } @@ -69,7 +68,7 @@ func (r *FileHeaderRule) Apply(file *lint.File, arguments lint.Arguments) []lint regex, err := regexp.Compile(r.header) if err != nil { - panic(err.Error()) + return newInternalFailureError(err) } if !regex.MatchString(comment) { diff --git a/vendor/github.com/mgechev/revive/rule/file_length_limit.go b/vendor/github.com/mgechev/revive/rule/file_length_limit.go index 0fe075c56..8a3b37126 100644 --- a/vendor/github.com/mgechev/revive/rule/file_length_limit.go +++ b/vendor/github.com/mgechev/revive/rule/file_length_limit.go @@ -7,7 +7,6 @@ import ( "go/ast" "go/token" "strings" - "sync" "github.com/mgechev/revive/lint" ) @@ -20,14 +19,10 @@ type FileLengthLimitRule struct { skipComments bool // skipBlankLines indicates whether to skip blank lines when counting lines. skipBlankLines bool - - configureOnce sync.Once } // Apply applies the rule to given file. -func (r *FileLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *FileLengthLimitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { if r.max <= 0 { // when max is negative or 0 the rule is disabled return nil @@ -44,7 +39,7 @@ func (r *FileLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) [ } if err := scanner.Err(); err != nil { - panic(err.Error()) + return newInternalFailureError(err) } lines := all @@ -62,7 +57,7 @@ func (r *FileLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) [ return []lint.Failure{ { - Category: "code-style", + Category: lint.FailureCategoryCodeStyle, Confidence: 1, Position: lint.FailurePosition{ Start: token.Position{ @@ -75,37 +70,41 @@ func (r *FileLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) [ } } -func (r *FileLengthLimitRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *FileLengthLimitRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { - return // use default + return nil // use default } argKV, ok := arguments[0].(map[string]any) if !ok { - panic(fmt.Sprintf(`invalid argument to the "file-length-limit" rule. Expecting a k,v map, got %T`, arguments[0])) + return fmt.Errorf(`invalid argument to the "file-length-limit" rule. Expecting a k,v map, got %T`, arguments[0]) } for k, v := range argKV { switch k { case "max": maxLines, ok := v.(int64) if !ok || maxLines < 0 { - panic(fmt.Sprintf(`invalid configuration value for max lines in "file-length-limit" rule; need positive int64 but got %T`, arguments[0])) + return fmt.Errorf(`invalid configuration value for max lines in "file-length-limit" rule; need positive int64 but got %T`, arguments[0]) } r.max = int(maxLines) case "skipComments": skipComments, ok := v.(bool) if !ok { - panic(fmt.Sprintf(`invalid configuration value for skip comments in "file-length-limit" rule; need bool but got %T`, arguments[1])) + return fmt.Errorf(`invalid configuration value for skip comments in "file-length-limit" rule; need bool but got %T`, arguments[1]) } r.skipComments = skipComments case "skipBlankLines": skipBlankLines, ok := v.(bool) if !ok { - panic(fmt.Sprintf(`invalid configuration value for skip blank lines in "file-length-limit" rule; need bool but got %T`, arguments[2])) + return fmt.Errorf(`invalid configuration value for skip blank lines in "file-length-limit" rule; need bool but got %T`, arguments[2]) } r.skipBlankLines = skipBlankLines } } + return nil } // Name returns the rule name. diff --git a/vendor/github.com/mgechev/revive/rule/filename_format.go b/vendor/github.com/mgechev/revive/rule/filename_format.go index 9d8047829..6d4905f18 100644 --- a/vendor/github.com/mgechev/revive/rule/filename_format.go +++ b/vendor/github.com/mgechev/revive/rule/filename_format.go @@ -4,7 +4,6 @@ import ( "fmt" "path/filepath" "regexp" - "sync" "unicode" "github.com/mgechev/revive/lint" @@ -13,14 +12,10 @@ import ( // FilenameFormatRule lints source filenames according to a set of regular expressions given as arguments type FilenameFormatRule struct { format *regexp.Regexp - - configureOnce sync.Once } // Apply applies the rule to the given file. -func (r *FilenameFormatRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *FilenameFormatRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { filename := filepath.Base(file.Name) if r.format.MatchString(filename) { return nil @@ -35,7 +30,7 @@ func (r *FilenameFormatRule) Apply(file *lint.File, arguments lint.Arguments) [] }} } -func (r *FilenameFormatRule) getMsgForNonASCIIChars(str string) string { +func (*FilenameFormatRule) getMsgForNonASCIIChars(str string) string { result := "" for _, c := range str { if c <= unicode.MaxASCII { @@ -53,29 +48,34 @@ func (*FilenameFormatRule) Name() string { return "filename-format" } -var defaultFormat = regexp.MustCompile("^[_A-Za-z0-9][_A-Za-z0-9-]*.go$") +var defaultFormat = regexp.MustCompile(`^[_A-Za-z0-9][_A-Za-z0-9-]*\.go$`) -func (r *FilenameFormatRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *FilenameFormatRule) Configure(arguments lint.Arguments) error { argsCount := len(arguments) if argsCount == 0 { r.format = defaultFormat - return + return nil } if argsCount > 1 { - panic(fmt.Sprintf("rule %q expects only one argument, got %d %v", r.Name(), argsCount, arguments)) + return fmt.Errorf("rule %q expects only one argument, got %d %v", r.Name(), argsCount, arguments) } arg := arguments[0] str, ok := arg.(string) if !ok { - panic(fmt.Sprintf("rule %q expects a string argument, got %v of type %T", r.Name(), arg, arg)) + return fmt.Errorf("rule %q expects a string argument, got %v of type %T", r.Name(), arg, arg) } format, err := regexp.Compile(str) if err != nil { - panic(fmt.Sprintf("rule %q expects a valid regexp argument, got %v for %s", r.Name(), err, arg)) + return fmt.Errorf("rule %q expects a valid regexp argument, got error for %s: %w", r.Name(), str, err) } r.format = format + + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/flag_param.go b/vendor/github.com/mgechev/revive/rule/flag_param.go index f9bfb712c..2f69503ca 100644 --- a/vendor/github.com/mgechev/revive/rule/flag_param.go +++ b/vendor/github.com/mgechev/revive/rule/flag_param.go @@ -7,64 +7,54 @@ import ( "github.com/mgechev/revive/lint" ) -// FlagParamRule lints given else constructs. +// FlagParamRule warns on boolean parameters that create a control coupling. type FlagParamRule struct{} // Apply applies the rule to given file. func (*FlagParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - w := lintFlagParamRule{onFailure: onFailure} - ast.Walk(w, file.AST) - return failures -} - -// Name returns the rule name. -func (*FlagParamRule) Name() string { - return "flag-parameter" -} - -type lintFlagParamRule struct { - onFailure func(lint.Failure) -} - -func (w lintFlagParamRule) Visit(node ast.Node) ast.Visitor { - fd, ok := node.(*ast.FuncDecl) - if !ok { - return w - } - - if fd.Body == nil { - return nil // skip whole function declaration - } + for _, decl := range file.AST.Decls { + fd, ok := decl.(*ast.FuncDecl) + isFuncWithNonEmptyBody := ok && fd.Body != nil + if !isFuncWithNonEmptyBody { + continue + } - for _, p := range fd.Type.Params.List { - t := p.Type + boolParams := map[string]struct{}{} + for _, param := range fd.Type.Params.List { + if !isIdent(param.Type, "bool") { + continue + } - id, ok := t.(*ast.Ident) - if !ok { - continue + for _, paramIdent := range param.Names { + boolParams[paramIdent.Name] = struct{}{} + } } - if id.Name != "bool" { + if len(boolParams) == 0 { continue } - cv := conditionVisitor{p.Names, fd, w} + cv := conditionVisitor{boolParams, fd, onFailure} ast.Walk(cv, fd.Body) } - return w + return failures +} + +// Name returns the rule name. +func (*FlagParamRule) Name() string { + return "flag-parameter" } type conditionVisitor struct { - ids []*ast.Ident - fd *ast.FuncDecl - linter lintFlagParamRule + idents map[string]struct{} + fd *ast.FuncDecl + onFailure func(lint.Failure) } func (w conditionVisitor) Visit(node ast.Node) ast.Visitor { @@ -73,31 +63,30 @@ func (w conditionVisitor) Visit(node ast.Node) ast.Visitor { return w } - fselect := func(n ast.Node) bool { + findUsesOfIdents := func(n ast.Node) bool { ident, ok := n.(*ast.Ident) if !ok { return false } - for _, id := range w.ids { - if ident.Name == id.Name { - return true - } + _, ok = w.idents[ident.Name] + if !ok { + return false } - return false + return w.idents[ident.Name] == struct{}{} } - uses := pick(ifStmt.Cond, fselect) + uses := pick(ifStmt.Cond, findUsesOfIdents) if len(uses) < 1 { return w } - w.linter.onFailure(lint.Failure{ + w.onFailure(lint.Failure{ Confidence: 1, Node: w.fd.Type.Params, - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Failure: fmt.Sprintf("parameter '%s' seems to be a control flag, avoid control coupling", uses[0]), }) diff --git a/vendor/github.com/mgechev/revive/rule/function_length.go b/vendor/github.com/mgechev/revive/rule/function_length.go index c58cd4c0f..53cb6827c 100644 --- a/vendor/github.com/mgechev/revive/rule/function_length.go +++ b/vendor/github.com/mgechev/revive/rule/function_length.go @@ -4,7 +4,6 @@ import ( "fmt" "go/ast" "reflect" - "sync" "github.com/mgechev/revive/lint" ) @@ -13,32 +12,58 @@ import ( type FunctionLength struct { maxStmt int maxLines int - - configureOnce sync.Once } -func (r *FunctionLength) configure(arguments lint.Arguments) { - maxStmt, maxLines := r.parseArguments(arguments) +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *FunctionLength) Configure(arguments lint.Arguments) error { + maxStmt, maxLines, err := r.parseArguments(arguments) + if err != nil { + return err + } r.maxStmt = int(maxStmt) r.maxLines = int(maxLines) + return nil } // Apply applies the rule to given file. -func (r *FunctionLength) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *FunctionLength) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } - walker := lintFuncLength{ - file: file, - maxStmt: r.maxStmt, - maxLines: r.maxLines, - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } + body := funcDecl.Body + emptyBody := body == nil || len(body.List) == 0 + if emptyBody { + return nil + } - ast.Walk(walker, file.AST) + if r.maxStmt > 0 { + stmtCount := r.countStmts(body.List) + if stmtCount > r.maxStmt { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of statements per function exceeded; max %d but got %d", r.maxStmt, stmtCount), + Node: funcDecl, + }) + } + } + + if r.maxLines > 0 { + lineCount := r.countLines(body, file) + if lineCount > r.maxLines { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of lines per function exceeded; max %d but got %d", r.maxLines, lineCount), + Node: funcDecl, + }) + } + } + } return failures } @@ -48,111 +73,69 @@ func (*FunctionLength) Name() string { return "function-length" } -const defaultFuncStmtsLimit = 50 -const defaultFuncLinesLimit = 75 +const ( + defaultFuncStmtsLimit = 50 + defaultFuncLinesLimit = 75 +) -func (*FunctionLength) parseArguments(arguments lint.Arguments) (maxStmt, maxLines int64) { +func (*FunctionLength) parseArguments(arguments lint.Arguments) (maxStmt, maxLines int64, err error) { if len(arguments) == 0 { - return defaultFuncStmtsLimit, defaultFuncLinesLimit + return defaultFuncStmtsLimit, defaultFuncLinesLimit, nil } const minArguments = 2 if len(arguments) != minArguments { - panic(fmt.Sprintf(`invalid configuration for "function-length" rule, expected %d arguments but got %d`, minArguments, len(arguments))) + return 0, 0, fmt.Errorf(`invalid configuration for "function-length" rule, expected %d arguments but got %d`, minArguments, len(arguments)) } maxStmt, maxStmtOk := arguments[0].(int64) if !maxStmtOk { - panic(fmt.Sprintf(`invalid configuration value for max statements in "function-length" rule; need int64 but got %T`, arguments[0])) + return 0, 0, fmt.Errorf(`invalid configuration value for max statements in "function-length" rule; need int64 but got %T`, arguments[0]) } if maxStmt < 0 { - panic(fmt.Sprintf(`the configuration value for max statements in "function-length" rule cannot be negative, got %d`, maxStmt)) + return 0, 0, fmt.Errorf(`the configuration value for max statements in "function-length" rule cannot be negative, got %d`, maxStmt) } maxLines, maxLinesOk := arguments[1].(int64) if !maxLinesOk { - panic(fmt.Sprintf(`invalid configuration value for max lines in "function-length" rule; need int64 but got %T`, arguments[1])) + return 0, 0, fmt.Errorf(`invalid configuration value for max lines in "function-length" rule; need int64 but got %T`, arguments[1]) } if maxLines < 0 { - panic(fmt.Sprintf(`the configuration value for max statements in "function-length" rule cannot be negative, got %d`, maxLines)) - } - - return maxStmt, maxLines -} - -type lintFuncLength struct { - file *lint.File - maxStmt int - maxLines int - onFailure func(lint.Failure) -} - -func (w lintFuncLength) Visit(n ast.Node) ast.Visitor { - node, ok := n.(*ast.FuncDecl) - if !ok { - return w - } - - body := node.Body - emptyBody := body == nil || len(node.Body.List) == 0 - if emptyBody { - return nil + return 0, 0, fmt.Errorf(`the configuration value for max statements in "function-length" rule cannot be negative, got %d`, maxLines) } - if w.maxStmt > 0 { - stmtCount := w.countStmts(node.Body.List) - if stmtCount > w.maxStmt { - w.onFailure(lint.Failure{ - Confidence: 1, - Failure: fmt.Sprintf("maximum number of statements per function exceeded; max %d but got %d", w.maxStmt, stmtCount), - Node: node, - }) - } - } - - if w.maxLines > 0 { - lineCount := w.countLines(node.Body) - if lineCount > w.maxLines { - w.onFailure(lint.Failure{ - Confidence: 1, - Failure: fmt.Sprintf("maximum number of lines per function exceeded; max %d but got %d", w.maxLines, lineCount), - Node: node, - }) - } - } - - return nil + return maxStmt, maxLines, nil } -func (w lintFuncLength) countLines(b *ast.BlockStmt) int { - return w.file.ToPosition(b.End()).Line - w.file.ToPosition(b.Pos()).Line - 1 +func (*FunctionLength) countLines(b *ast.BlockStmt, file *lint.File) int { + return file.ToPosition(b.End()).Line - file.ToPosition(b.Pos()).Line - 1 } -func (w lintFuncLength) countStmts(b []ast.Stmt) int { +func (r *FunctionLength) countStmts(b []ast.Stmt) int { count := 0 for _, s := range b { switch stmt := s.(type) { case *ast.BlockStmt: - count += w.countStmts(stmt.List) + count += r.countStmts(stmt.List) case *ast.IfStmt: - count += 1 + w.countBodyListStmts(stmt) + count += 1 + r.countBodyListStmts(stmt) if stmt.Else != nil { elseBody, ok := stmt.Else.(*ast.BlockStmt) if ok { - count += w.countStmts(elseBody.List) + count += r.countStmts(elseBody.List) } } case *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: - count += 1 + w.countBodyListStmts(stmt) + count += 1 + r.countBodyListStmts(stmt) case *ast.CaseClause: - count += w.countStmts(stmt.Body) + count += r.countStmts(stmt.Body) case *ast.AssignStmt: - count += 1 + w.countFuncLitStmts(stmt.Rhs[0]) + count += 1 + r.countFuncLitStmts(stmt.Rhs[0]) case *ast.GoStmt: - count += 1 + w.countFuncLitStmts(stmt.Call.Fun) + count += 1 + r.countFuncLitStmts(stmt.Call.Fun) case *ast.DeferStmt: - count += 1 + w.countFuncLitStmts(stmt.Call.Fun) + count += 1 + r.countFuncLitStmts(stmt.Call.Fun) default: count++ } @@ -161,14 +144,15 @@ func (w lintFuncLength) countStmts(b []ast.Stmt) int { return count } -func (w lintFuncLength) countFuncLitStmts(stmt ast.Expr) int { +func (r *FunctionLength) countFuncLitStmts(stmt ast.Expr) int { if block, ok := stmt.(*ast.FuncLit); ok { - return w.countStmts(block.Body.List) + return r.countStmts(block.Body.List) } + return 0 } -func (w lintFuncLength) countBodyListStmts(t any) int { +func (r *FunctionLength) countBodyListStmts(t any) int { i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() - return w.countStmts(i.([]ast.Stmt)) + return r.countStmts(i.([]ast.Stmt)) } diff --git a/vendor/github.com/mgechev/revive/rule/function_result_limit.go b/vendor/github.com/mgechev/revive/rule/function_result_limit.go index 5b72f01ab..b5508f368 100644 --- a/vendor/github.com/mgechev/revive/rule/function_result_limit.go +++ b/vendor/github.com/mgechev/revive/rule/function_result_limit.go @@ -1,53 +1,43 @@ package rule import ( + "errors" "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) -// FunctionResultsLimitRule lints given else constructs. +// FunctionResultsLimitRule limits the maximum number of results a function can return. type FunctionResultsLimitRule struct { max int - - configureOnce sync.Once -} - -const defaultResultsLimit = 3 - -func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { - if len(arguments) < 1 { - r.max = defaultResultsLimit - return - } - - maxResults, ok := arguments[0].(int64) // Alt. non panicking version - if !ok { - panic(fmt.Sprintf(`invalid value passed as return results number to the "function-result-limit" rule; need int64 but got %T`, arguments[0])) - } - if maxResults < 0 { - panic(`the value passed as return results number to the "function-result-limit" rule cannot be negative`) - } - - r.max = int(maxResults) } // Apply applies the rule to given file. -func (r *FunctionResultsLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *FunctionResultsLimitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } - walker := lintFunctionResultsNum{ - max: r.max, - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } + num := 0 + hasResults := funcDecl.Type.Results != nil + if hasResults { + num = funcDecl.Type.Results.NumFields() + } - ast.Walk(walker, file.AST) + if num <= r.max { + continue + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("maximum number of return results per function exceeded; max %d but got %d", r.max, num), + Node: funcDecl.Type, + }) + } return failures } @@ -57,29 +47,25 @@ func (*FunctionResultsLimitRule) Name() string { return "function-result-limit" } -type lintFunctionResultsNum struct { - max int - onFailure func(lint.Failure) -} +const defaultResultsLimit = 3 -func (w lintFunctionResultsNum) Visit(n ast.Node) ast.Visitor { - node, ok := n.(*ast.FuncDecl) - if ok { - num := 0 - hasResults := node.Type.Results != nil - if hasResults { - num = node.Type.Results.NumFields() - } - if num > w.max { - w.onFailure(lint.Failure{ - Confidence: 1, - Failure: fmt.Sprintf("maximum number of return results per function exceeded; max %d but got %d", w.max, num), - Node: node.Type, - }) - } +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *FunctionResultsLimitRule) Configure(arguments lint.Arguments) error { + if len(arguments) < 1 { + r.max = defaultResultsLimit + return nil + } - return nil // skip visiting function's body + maxResults, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + return fmt.Errorf(`invalid value passed as return results number to the "function-result-limit" rule; need int64 but got %T`, arguments[0]) + } + if maxResults < 0 { + return errors.New(`the value passed as return results number to the "function-result-limit" rule cannot be negative`) } - return w + r.max = int(maxResults) + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/get_return.go b/vendor/github.com/mgechev/revive/rule/get_return.go index 06323a087..cf58a687c 100644 --- a/vendor/github.com/mgechev/revive/rule/get_return.go +++ b/vendor/github.com/mgechev/revive/rule/get_return.go @@ -8,19 +8,35 @@ import ( "github.com/mgechev/revive/lint" ) -// GetReturnRule lints given else constructs. +// GetReturnRule warns on getters that do not yield any result. type GetReturnRule struct{} // Apply applies the rule to given file. func (*GetReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) + for _, decl := range file.AST.Decls { + fd, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + + if !isGetter(fd.Name.Name) { + continue + } + + if hasResults(fd.Type.Results) { + continue + } + + failures = append(failures, lint.Failure{ + Confidence: 0.8, + Node: fd, + Category: lint.FailureCategoryLogic, + Failure: fmt.Sprintf("function '%s' seems to be a getter but it does not return any result", fd.Name.Name), + }) } - w := lintReturnRule{onFailure} - ast.Walk(w, file.AST) return failures } @@ -29,10 +45,6 @@ func (*GetReturnRule) Name() string { return "get-return" } -type lintReturnRule struct { - onFailure func(lint.Failure) -} - const getterPrefix = "GET" var lenGetterPrefix = len(getterPrefix) @@ -57,24 +69,3 @@ func isGetter(name string) bool { func hasResults(rs *ast.FieldList) bool { return rs != nil && len(rs.List) > 0 } - -func (w lintReturnRule) Visit(node ast.Node) ast.Visitor { - fd, ok := node.(*ast.FuncDecl) - if !ok { - return w - } - - if !isGetter(fd.Name.Name) { - return w - } - if !hasResults(fd.Type.Results) { - w.onFailure(lint.Failure{ - Confidence: 0.8, - Node: fd, - Category: "logic", - Failure: fmt.Sprintf("function '%s' seems to be a getter but it does not return any result", fd.Name.Name), - }) - } - - return w -} diff --git a/vendor/github.com/mgechev/revive/rule/identical_branches.go b/vendor/github.com/mgechev/revive/rule/identical_branches.go index c6008925f..044b04147 100644 --- a/vendor/github.com/mgechev/revive/rule/identical_branches.go +++ b/vendor/github.com/mgechev/revive/rule/identical_branches.go @@ -59,7 +59,7 @@ func (w *lintIdenticalBranches) Visit(node ast.Node) ast.Visitor { return w } -func (lintIdenticalBranches) identicalBranches(branches []*ast.BlockStmt) bool { +func (*lintIdenticalBranches) identicalBranches(branches []*ast.BlockStmt) bool { if len(branches) < 2 { return false // only one branch to compare thus we return } @@ -77,11 +77,11 @@ func (lintIdenticalBranches) identicalBranches(branches []*ast.BlockStmt) bool { return true } -func (w lintIdenticalBranches) newFailure(node ast.Node, msg string) { +func (w *lintIdenticalBranches) newFailure(node ast.Node, msg string) { w.onFailure(lint.Failure{ Confidence: 1, Node: node, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: msg, }) } diff --git a/vendor/github.com/mgechev/revive/rule/if_return.go b/vendor/github.com/mgechev/revive/rule/if_return.go index a6a3113ad..f9e5ef233 100644 --- a/vendor/github.com/mgechev/revive/rule/if_return.go +++ b/vendor/github.com/mgechev/revive/rule/if_return.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// IfReturnRule lints given else constructs. +// IfReturnRule searches for redundant `if` when returning an error. type IfReturnRule struct{} // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/import_alias_naming.go b/vendor/github.com/mgechev/revive/rule/import_alias_naming.go index 043bf0d76..0859150a9 100644 --- a/vendor/github.com/mgechev/revive/rule/import_alias_naming.go +++ b/vendor/github.com/mgechev/revive/rule/import_alias_naming.go @@ -3,7 +3,6 @@ package rule import ( "fmt" "regexp" - "sync" "github.com/mgechev/revive/lint" ) @@ -12,47 +11,57 @@ import ( type ImportAliasNamingRule struct { allowRegexp *regexp.Regexp denyRegexp *regexp.Regexp - - configureOnce sync.Once } const defaultImportAliasNamingAllowRule = "^[a-z][a-z0-9]{0,}$" var defaultImportAliasNamingAllowRegexp = regexp.MustCompile(defaultImportAliasNamingAllowRule) -func (r *ImportAliasNamingRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ImportAliasNamingRule) Configure(arguments lint.Arguments) error { if len(arguments) == 0 { r.allowRegexp = defaultImportAliasNamingAllowRegexp - return + return nil } switch namingRule := arguments[0].(type) { case string: - r.setAllowRule(namingRule) + err := r.setAllowRule(namingRule) + if err != nil { + return err + } case map[string]any: // expecting map[string]string for k, v := range namingRule { switch k { case "allowRegex": - r.setAllowRule(v) + err := r.setAllowRule(v) + if err != nil { + return err + } case "denyRegex": - r.setDenyRule(v) + err := r.setDenyRule(v) + if err != nil { + return err + } + default: - panic(fmt.Sprintf("Invalid map key for 'import-alias-naming' rule. Expecting 'allowRegex' or 'denyRegex', got %v", k)) + return fmt.Errorf("invalid map key for 'import-alias-naming' rule. Expecting 'allowRegex' or 'denyRegex', got %v", k) } } default: - panic(fmt.Sprintf("Invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0])) + return fmt.Errorf("invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0]) } if r.allowRegexp == nil && r.denyRegexp == nil { r.allowRegexp = defaultImportAliasNamingAllowRegexp } + return nil } // Apply applies the rule to given file. -func (r *ImportAliasNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *ImportAliasNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure for _, is := range file.AST.Imports { @@ -71,7 +80,7 @@ func (r *ImportAliasNamingRule) Apply(file *lint.File, arguments lint.Arguments) Confidence: 1, Failure: fmt.Sprintf("import name (%s) must match the regular expression: %s", alias.Name, r.allowRegexp.String()), Node: alias, - Category: "imports", + Category: lint.FailureCategoryImports, }) } @@ -80,7 +89,7 @@ func (r *ImportAliasNamingRule) Apply(file *lint.File, arguments lint.Arguments) Confidence: 1, Failure: fmt.Sprintf("import name (%s) must NOT match the regular expression: %s", alias.Name, r.denyRegexp.String()), Node: alias, - Category: "imports", + Category: lint.FailureCategoryImports, }) } } @@ -93,28 +102,30 @@ func (*ImportAliasNamingRule) Name() string { return "import-alias-naming" } -func (r *ImportAliasNamingRule) setAllowRule(value any) { +func (r *ImportAliasNamingRule) setAllowRule(value any) error { namingRule, ok := value.(string) if !ok { - panic(fmt.Sprintf("Invalid argument '%v' for import-alias-naming allowRegexp rule. Expecting string, got %T", value, value)) + return fmt.Errorf("invalid argument '%v' for import-alias-naming allowRegexp rule. Expecting string, got %T", value, value) } namingRuleRegexp, err := regexp.Compile(namingRule) if err != nil { - panic(fmt.Sprintf("Invalid argument to the import-alias-naming allowRegexp rule. Expecting %q to be a valid regular expression, got: %v", namingRule, err)) + return fmt.Errorf("invalid argument to the import-alias-naming allowRegexp rule. Expecting %q to be a valid regular expression, got: %w", namingRule, err) } r.allowRegexp = namingRuleRegexp + return nil } -func (r *ImportAliasNamingRule) setDenyRule(value any) { +func (r *ImportAliasNamingRule) setDenyRule(value any) error { namingRule, ok := value.(string) if !ok { - panic(fmt.Sprintf("Invalid argument '%v' for import-alias-naming denyRegexp rule. Expecting string, got %T", value, value)) + return fmt.Errorf("invalid argument '%v' for import-alias-naming denyRegexp rule. Expecting string, got %T", value, value) } namingRuleRegexp, err := regexp.Compile(namingRule) if err != nil { - panic(fmt.Sprintf("Invalid argument to the import-alias-naming denyRegexp rule. Expecting %q to be a valid regular expression, got: %v", namingRule, err)) + return fmt.Errorf("invalid argument to the import-alias-naming denyRegexp rule. Expecting %q to be a valid regular expression, got: %w", namingRule, err) } r.denyRegexp = namingRuleRegexp + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/import_shadowing.go b/vendor/github.com/mgechev/revive/rule/import_shadowing.go index 046aeb688..69d17f2b1 100644 --- a/vendor/github.com/mgechev/revive/rule/import_shadowing.go +++ b/vendor/github.com/mgechev/revive/rule/import_shadowing.go @@ -9,7 +9,7 @@ import ( "github.com/mgechev/revive/lint" ) -// ImportShadowingRule lints given else constructs. +// ImportShadowingRule spots identifiers that shadow an import. type ImportShadowingRule struct{} // Apply applies the rule to given file. @@ -28,7 +28,7 @@ func (*ImportShadowingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Fail onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, - alreadySeen: map[*ast.Object]struct{}{}, + alreadySeen: map[*ast.Object]struct{}{}, // TODO: ast.Object is deprecated skipIdents: map[*ast.Ident]struct{}{}, } @@ -62,7 +62,7 @@ type importShadowing struct { packageNameIdent *ast.Ident importNames map[string]struct{} onFailure func(lint.Failure) - alreadySeen map[*ast.Object]struct{} + alreadySeen map[*ast.Object]struct{} // TODO: ast.Object is deprecated skipIdents map[*ast.Ident]struct{} } @@ -103,7 +103,7 @@ func (w importShadowing) Visit(n ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "naming", + Category: lint.FailureCategoryNaming, Failure: fmt.Sprintf("The name '%s' shadows an import name", id), }) diff --git a/vendor/github.com/mgechev/revive/rule/imports_blocklist.go b/vendor/github.com/mgechev/revive/rule/imports_blocklist.go index 18d77ca1c..c96382daf 100644 --- a/vendor/github.com/mgechev/revive/rule/imports_blocklist.go +++ b/vendor/github.com/mgechev/revive/rule/imports_blocklist.go @@ -3,33 +3,34 @@ package rule import ( "fmt" "regexp" - "sync" "github.com/mgechev/revive/lint" ) -// ImportsBlocklistRule lints given else constructs. +// ImportsBlocklistRule disallows importing the specified packages. type ImportsBlocklistRule struct { blocklist []*regexp.Regexp - - configureOnce sync.Once } var replaceImportRegexp = regexp.MustCompile(`/?\*\*/?`) -func (r *ImportsBlocklistRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ImportsBlocklistRule) Configure(arguments lint.Arguments) error { r.blocklist = []*regexp.Regexp{} for _, arg := range arguments { argStr, ok := arg.(string) if !ok { - panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting a string, got %T", arg)) + return fmt.Errorf("invalid argument to the imports-blocklist rule. Expecting a string, got %T", arg) } regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceImportRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) if err != nil { - panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) + return fmt.Errorf("invalid argument to the imports-blocklist rule. Expecting %q to be a valid regular expression, got: %w", argStr, err) } r.blocklist = append(r.blocklist, regStr) } + return nil } func (r *ImportsBlocklistRule) isBlocklisted(path string) bool { @@ -42,9 +43,7 @@ func (r *ImportsBlocklistRule) isBlocklisted(path string) bool { } // Apply applies the rule to given file. -func (r *ImportsBlocklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *ImportsBlocklistRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure for _, is := range file.AST.Imports { @@ -54,7 +53,7 @@ func (r *ImportsBlocklistRule) Apply(file *lint.File, arguments lint.Arguments) Confidence: 1, Failure: "should not use the following blocklisted import: " + path.Value, Node: is, - Category: "imports", + Category: lint.FailureCategoryImports, }) } } diff --git a/vendor/github.com/mgechev/revive/rule/increment_decrement.go b/vendor/github.com/mgechev/revive/rule/increment_decrement.go index 34a8e1ec5..d8cebcf25 100644 --- a/vendor/github.com/mgechev/revive/rule/increment_decrement.go +++ b/vendor/github.com/mgechev/revive/rule/increment_decrement.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// IncrementDecrementRule lints given else constructs. +// IncrementDecrementRule lints `i += 1` and `i -= 1` constructs. type IncrementDecrementRule struct{} // Apply applies the rule to given file. @@ -61,7 +61,7 @@ func (w lintIncrementDecrement) Visit(n ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 0.8, Node: as, - Category: "unary-op", + Category: lint.FailureCategoryUnaryOp, Failure: fmt.Sprintf("should replace %s with %s%s", w.file.Render(as), w.file.Render(as.Lhs[0]), suffix), }) return w diff --git a/vendor/github.com/mgechev/revive/rule/indent_error_flow.go b/vendor/github.com/mgechev/revive/rule/indent_error_flow.go index ebc1e793a..4dda64c14 100644 --- a/vendor/github.com/mgechev/revive/rule/indent_error_flow.go +++ b/vendor/github.com/mgechev/revive/rule/indent_error_flow.go @@ -5,12 +5,12 @@ import ( "github.com/mgechev/revive/lint" ) -// IndentErrorFlowRule lints given else constructs. +// IndentErrorFlowRule prevents redundant else statements. type IndentErrorFlowRule struct{} // Apply applies the rule to given file. func (e *IndentErrorFlowRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - return ifelse.Apply(e, file.AST, ifelse.TargetElse, args) + return ifelse.Apply(e.checkIfElse, file.AST, ifelse.TargetElse, args) } // Name returns the rule name. @@ -18,28 +18,31 @@ func (*IndentErrorFlowRule) Name() string { return "indent-error-flow" } -// CheckIfElse evaluates the rule against an ifelse.Chain and returns a failure message if applicable. -func (*IndentErrorFlowRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) string { +func (*IndentErrorFlowRule) checkIfElse(chain ifelse.Chain, args ifelse.Args) (string, bool) { + if !chain.HasElse { + return "", false + } + if !chain.If.Deviates() { // this rule only applies if the if-block deviates control flow - return "" + return "", false } if chain.HasPriorNonDeviating { // if we de-indent the "else" block then a previous branch - // might flow into it, affecting program behaviour - return "" + // might flow into it, affecting program behavior + return "", false } if !chain.If.Returns() { // avoid overlapping with superfluous-else - return "" + return "", false } - if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls) { + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls()) { // avoid increasing variable scope - return "" + return "", false } - return "if block ends with a return statement, so drop this else and outdent its block" + return "if block ends with a return statement, so drop this else and outdent its block", true } diff --git a/vendor/github.com/mgechev/revive/rule/line_length_limit.go b/vendor/github.com/mgechev/revive/rule/line_length_limit.go index 415761e1e..0c4c57691 100644 --- a/vendor/github.com/mgechev/revive/rule/line_length_limit.go +++ b/vendor/github.com/mgechev/revive/rule/line_length_limit.go @@ -3,42 +3,42 @@ package rule import ( "bufio" "bytes" + "errors" "fmt" "go/token" "strings" - "sync" "unicode/utf8" "github.com/mgechev/revive/lint" ) -// LineLengthLimitRule lints given else constructs. +// LineLengthLimitRule lints number of characters in a line. type LineLengthLimitRule struct { max int - - configureOnce sync.Once } const defaultLineLengthLimit = 80 -func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *LineLengthLimitRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.max = defaultLineLengthLimit - return + return nil } maxLength, ok := arguments[0].(int64) // Alt. non panicking version if !ok || maxLength < 0 { - panic(`invalid value passed as argument number to the "line-length-limit" rule`) + return errors.New(`invalid value passed as argument number to the "line-length-limit" rule`) } r.max = int(maxLength) + return nil } // Apply applies the rule to given file. -func (r *LineLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *LineLengthLimitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure checker := lintLineLengthNum{ @@ -76,7 +76,7 @@ func (r lintLineLengthNum) check() { c := utf8.RuneCountInString(t) if c > r.max { r.onFailure(lint.Failure{ - Category: "code-style", + Category: lint.FailureCategoryCodeStyle, Position: lint.FailurePosition{ // Offset not set; it is non-trivial, and doesn't appear to be needed. Start: token.Position{ diff --git a/vendor/github.com/mgechev/revive/rule/max_control_nesting.go b/vendor/github.com/mgechev/revive/rule/max_control_nesting.go index b2c5af70e..5bb11d098 100644 --- a/vendor/github.com/mgechev/revive/rule/max_control_nesting.go +++ b/vendor/github.com/mgechev/revive/rule/max_control_nesting.go @@ -1,26 +1,22 @@ package rule import ( + "errors" "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) -// MaxControlNestingRule lints given else constructs. +// MaxControlNestingRule sets restriction for maximum nesting of control structures. type MaxControlNestingRule struct { max int64 - - configureOnce sync.Once } const defaultMaxControlNesting = 5 // Apply applies the rule to given file. -func (r *MaxControlNestingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *MaxControlNestingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure fileAst := file.AST @@ -55,7 +51,7 @@ func (w *lintMaxControlNesting) Visit(n ast.Node) ast.Visitor { Failure: fmt.Sprintf("control flow nesting exceeds %d", w.max), Confidence: 1, Node: w.lastCtrlStmt, - Category: "complexity", + Category: lint.FailureCategoryComplexity, }) return nil // stop visiting deeper } @@ -107,17 +103,24 @@ func (w *lintMaxControlNesting) walkControlledBlock(b ast.Node) { w.nestingLevelAcc = oldNestingLevel } -func (r *MaxControlNestingRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *MaxControlNestingRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.max = defaultMaxControlNesting - return + return nil } - checkNumberOfArguments(1, arguments, r.Name()) + check := checkNumberOfArguments(1, arguments, r.Name()) + if check != nil { + return check + } maxNesting, ok := arguments[0].(int64) // Alt. non panicking version if !ok { - panic(`invalid value passed as argument number to the "max-control-nesting" rule`) + return errors.New(`invalid value passed as argument number to the "max-control-nesting" rule`) } r.max = maxNesting + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/max_public_structs.go b/vendor/github.com/mgechev/revive/rule/max_public_structs.go index d6f91e375..f27edd7e6 100644 --- a/vendor/github.com/mgechev/revive/rule/max_public_structs.go +++ b/vendor/github.com/mgechev/revive/rule/max_public_structs.go @@ -1,42 +1,45 @@ package rule import ( + "errors" "fmt" "go/ast" "strings" - "sync" "github.com/mgechev/revive/lint" ) -// MaxPublicStructsRule lints given else constructs. +// MaxPublicStructsRule lints the number of public structs in a file. type MaxPublicStructsRule struct { max int64 - - configureOnce sync.Once } const defaultMaxPublicStructs = 5 -func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *MaxPublicStructsRule) Configure(arguments lint.Arguments) error { if len(arguments) < 1 { r.max = defaultMaxPublicStructs - return + return nil } - checkNumberOfArguments(1, arguments, r.Name()) + err := checkNumberOfArguments(1, arguments, r.Name()) + if err != nil { + return err + } maxStructs, ok := arguments[0].(int64) // Alt. non panicking version if !ok { - panic(`invalid value passed as argument number to the "max-public-structs" rule`) + return errors.New(`invalid value passed as argument number to the "max-public-structs" rule`) } r.max = maxStructs + return nil } // Apply applies the rule to given file. -func (r *MaxPublicStructsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *MaxPublicStructsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure if r.max < 1 { @@ -59,7 +62,7 @@ func (r *MaxPublicStructsRule) Apply(file *lint.File, arguments lint.Arguments) Failure: fmt.Sprintf("you have exceeded the maximum number (%d) of public struct declarations", r.max), Confidence: 1, Node: fileAst, - Category: "style", + Category: lint.FailureCategoryStyle, }) } diff --git a/vendor/github.com/mgechev/revive/rule/modifies_param.go b/vendor/github.com/mgechev/revive/rule/modifies_param.go index a68ae2501..da509087d 100644 --- a/vendor/github.com/mgechev/revive/rule/modifies_param.go +++ b/vendor/github.com/mgechev/revive/rule/modifies_param.go @@ -7,7 +7,7 @@ import ( "github.com/mgechev/revive/lint" ) -// ModifiesParamRule lints given else constructs. +// ModifiesParamRule warns on assignments to function parameters. type ModifiesParamRule struct{} // Apply applies the rule to given file. @@ -73,7 +73,7 @@ func checkParam(id *ast.Ident, w *lintModifiesParamRule) { w.onFailure(lint.Failure{ Confidence: 0.5, // confidence is low because of shadow variables Node: id, - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Failure: fmt.Sprintf("parameter '%s' seems to be modified", id), }) } diff --git a/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go b/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go index 2f92991f5..9af91099f 100644 --- a/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go +++ b/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go @@ -12,99 +12,35 @@ import ( type ModifiesValRecRule struct{} // Apply applies the rule to given file. -func (*ModifiesValRecRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *ModifiesValRecRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - w := lintModifiesValRecRule{file: file, onFailure: onFailure} file.Pkg.TypeCheck() - ast.Walk(w, file.AST) - - return failures -} - -// Name returns the rule name. -func (*ModifiesValRecRule) Name() string { - return "modifies-value-receiver" -} - -type lintModifiesValRecRule struct { - file *lint.File - onFailure func(lint.Failure) -} - -func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.FuncDecl: - if n.Recv == nil { - return nil // skip, not a method - } - - receiver := n.Recv.List[0] - if _, ok := receiver.Type.(*ast.StarExpr); ok { - return nil // skip, method with pointer receiver + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + isAMethod := ok && funcDecl.Recv != nil + if !isAMethod { + continue // skip, not a method } - if w.skipType(receiver.Type) { - return nil // skip, receiver is a map or array - } - - if len(receiver.Names) < 1 { - return nil // skip, anonymous receiver + receiver := funcDecl.Recv.List[0] + if r.mustSkip(receiver, file.Pkg) { + continue } receiverName := receiver.Names[0].Name - if receiverName == "_" { - return nil // skip, anonymous receiver - } - - receiverAssignmentFinder := func(n ast.Node) bool { - // look for assignments with the receiver in the right hand - assignment, ok := n.(*ast.AssignStmt) - if !ok { - return false - } - - for _, exp := range assignment.Lhs { - switch e := exp.(type) { - case *ast.IndexExpr: // receiver...[] = ... - continue - case *ast.StarExpr: // *receiver = ... - continue - case *ast.SelectorExpr: // receiver.field = ... - name := w.getNameFromExpr(e.X) - if name == "" || name != receiverName { - continue - } - case *ast.Ident: // receiver := ... - if e.Name != receiverName { - continue - } - default: - continue - } - - return true - } - - return false - } - - assignmentsToReceiver := pick(n.Body, receiverAssignmentFinder) + assignmentsToReceiver := r.getReceiverModifications(receiverName, funcDecl.Body) if len(assignmentsToReceiver) == 0 { - return nil // receiver is not modified + continue // receiver is not modified } - methodReturnsReceiver := len(w.findReturnReceiverStatements(receiverName, n.Body)) > 0 + methodReturnsReceiver := len(r.findReturnReceiverStatements(receiverName, funcDecl.Body)) > 0 if methodReturnsReceiver { - return nil // modification seems legit (see issue #1066) + continue // modification seems legit (see issue #1066) } for _, assignment := range assignmentsToReceiver { - w.onFailure(lint.Failure{ + failures = append(failures, lint.Failure{ Node: assignment, Confidence: 1, Failure: "suspicious assignment to a by-value method receiver", @@ -112,11 +48,16 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { } } - return w + return failures +} + +// Name returns the rule name. +func (*ModifiesValRecRule) Name() string { + return "modifies-value-receiver" } -func (w lintModifiesValRecRule) skipType(t ast.Expr) bool { - rt := w.file.Pkg.TypeOf(t) +func (*ModifiesValRecRule) skipType(t ast.Expr, pkg *lint.Package) bool { + rt := pkg.TypeOf(t) if rt == nil { return false } @@ -128,7 +69,7 @@ func (w lintModifiesValRecRule) skipType(t ast.Expr) bool { return strings.HasPrefix(rtName, "[]") || strings.HasPrefix(rtName, "map[") } -func (lintModifiesValRecRule) getNameFromExpr(ie ast.Expr) string { +func (*ModifiesValRecRule) getNameFromExpr(ie ast.Expr) string { ident, ok := ie.(*ast.Ident) if !ok { return "" @@ -137,7 +78,7 @@ func (lintModifiesValRecRule) getNameFromExpr(ie ast.Expr) string { return ident.Name } -func (w lintModifiesValRecRule) findReturnReceiverStatements(receiverName string, target ast.Node) []ast.Node { +func (r *ModifiesValRecRule) findReturnReceiverStatements(receiverName string, target ast.Node) []ast.Node { finder := func(n ast.Node) bool { // look for returns with the receiver as value returnStatement, ok := n.(*ast.ReturnStmt) @@ -148,7 +89,7 @@ func (w lintModifiesValRecRule) findReturnReceiverStatements(receiverName string for _, exp := range returnStatement.Results { switch e := exp.(type) { case *ast.SelectorExpr: // receiver.field = ... - name := w.getNameFromExpr(e.X) + name := r.getNameFromExpr(e.X) if name == "" || name != receiverName { continue } @@ -160,7 +101,7 @@ func (w lintModifiesValRecRule) findReturnReceiverStatements(receiverName string if e.Op != token.AND { continue } - name := w.getNameFromExpr(e.X) + name := r.getNameFromExpr(e.X) if name == "" || name != receiverName { continue } @@ -177,3 +118,66 @@ func (w lintModifiesValRecRule) findReturnReceiverStatements(receiverName string return pick(target, finder) } + +func (r *ModifiesValRecRule) mustSkip(receiver *ast.Field, pkg *lint.Package) bool { + if _, ok := receiver.Type.(*ast.StarExpr); ok { + return true // skip, method with pointer receiver + } + + if len(receiver.Names) < 1 { + return true // skip, anonymous receiver + } + + receiverName := receiver.Names[0].Name + if receiverName == "_" { + return true // skip, anonymous receiver + } + + if r.skipType(receiver.Type, pkg) { + return true // skip, receiver is a map or array + } + + return false +} + +func (r *ModifiesValRecRule) getReceiverModifications(receiverName string, funcBody *ast.BlockStmt) []ast.Node { + receiverModificationFinder := func(n ast.Node) bool { + switch node := n.(type) { + case *ast.IncDecStmt: + se, ok := node.X.(*ast.SelectorExpr) + if !ok { + return false + } + + name := r.getNameFromExpr(se.X) + return name == receiverName + case *ast.AssignStmt: + // look for assignments with the receiver in the right hand + for _, exp := range node.Lhs { + switch e := exp.(type) { + case *ast.IndexExpr: // receiver...[] = ... + continue + case *ast.StarExpr: // *receiver = ... + continue + case *ast.SelectorExpr: // receiver.field = ... + name := r.getNameFromExpr(e.X) + if name == "" || name != receiverName { + continue + } + case *ast.Ident: // receiver := ... + if e.Name != receiverName { + continue + } + default: + continue + } + + return true + } + } + + return false + } + + return pick(funcBody, receiverModificationFinder) +} diff --git a/vendor/github.com/mgechev/revive/rule/nested_structs.go b/vendor/github.com/mgechev/revive/rule/nested_structs.go index 147bd482b..49e240b6f 100644 --- a/vendor/github.com/mgechev/revive/rule/nested_structs.go +++ b/vendor/github.com/mgechev/revive/rule/nested_structs.go @@ -68,7 +68,7 @@ func (l *lintStruct) Visit(n ast.Node) ast.Visitor { func (l *lintStruct) fail(n ast.Node) { l.onFailure(lint.Failure{ Failure: "no nested structs are allowed", - Category: "style", + Category: lint.FailureCategoryStyle, Node: n, Confidence: 1, }) diff --git a/vendor/github.com/mgechev/revive/rule/optimize_operands_order.go b/vendor/github.com/mgechev/revive/rule/optimize_operands_order.go index 43d982d6b..c9297d2d4 100644 --- a/vendor/github.com/mgechev/revive/rule/optimize_operands_order.go +++ b/vendor/github.com/mgechev/revive/rule/optimize_operands_order.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// OptimizeOperandsOrderRule lints given else constructs. +// OptimizeOperandsOrderRule checks inefficient conditional expressions. type OptimizeOperandsOrderRule struct{} // Apply applies the rule to given file. @@ -78,7 +78,7 @@ func (w lintOptimizeOperandsOrderlExpr) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Failure: fmt.Sprintf("for better performance '%v' might be rewritten as '%v'", gofmt(binExpr), gofmt(&newExpr)), Node: node, - Category: "optimization", + Category: lint.FailureCategoryOptimization, Confidence: 0.3, }) diff --git a/vendor/github.com/mgechev/revive/rule/package_comments.go b/vendor/github.com/mgechev/revive/rule/package_comments.go index f1e5462fe..20afee88e 100644 --- a/vendor/github.com/mgechev/revive/rule/package_comments.go +++ b/vendor/github.com/mgechev/revive/rule/package_comments.go @@ -88,7 +88,7 @@ func (l *lintPackageComments) checkPackageComment() []lint.Failure { if docFile != nil { pkgFile := l.file.Pkg.Files()[fileSource] return []lint.Failure{{ - Category: "comments", + Category: lint.FailureCategoryComments, Position: lint.FailurePosition{ Start: pkgFile.ToPosition(docFile.Pos()), End: pkgFile.ToPosition(docFile.Name.End()), @@ -131,7 +131,7 @@ func (l *lintPackageComments) Visit(_ ast.Node) ast.Visitor { Column: 1, } l.onFailure(lint.Failure{ - Category: "comments", + Category: lint.FailureCategoryComments, Position: lint.FailurePosition{ Start: pos, End: pos, @@ -154,7 +154,7 @@ func (l *lintPackageComments) Visit(_ ast.Node) ast.Visitor { // Only non-main packages need to keep to this form. if !l.file.Pkg.IsMain() && !strings.HasPrefix(s, prefix) && !isDirectiveComment(s) { l.onFailure(lint.Failure{ - Category: "comments", + Category: lint.FailureCategoryComments, Node: l.fileAst.Doc, Confidence: 1, Failure: fmt.Sprintf(`package comment should be of the form "%s..."`, prefix), diff --git a/vendor/github.com/mgechev/revive/rule/range.go b/vendor/github.com/mgechev/revive/rule/range.go index 9d483a673..b54078e4d 100644 --- a/vendor/github.com/mgechev/revive/rule/range.go +++ b/vendor/github.com/mgechev/revive/rule/range.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// RangeRule lints given else constructs. +// RangeRule prevents redundant variables when iterating over a collection. type RangeRule struct{} // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/range_val_address.go b/vendor/github.com/mgechev/revive/rule/range_val_address.go index d2ab0392a..239175c6e 100644 --- a/vendor/github.com/mgechev/revive/rule/range_val_address.go +++ b/vendor/github.com/mgechev/revive/rule/range_val_address.go @@ -70,7 +70,7 @@ func (w rangeValAddress) Visit(node ast.Node) ast.Visitor { type rangeBodyVisitor struct { valueIsStarExpr bool - valueID *ast.Object + valueID *ast.Object // TODO: ast.Object is deprecated onFailure func(lint.Failure) } @@ -140,7 +140,7 @@ func (bw rangeBodyVisitor) isAccessingRangeValueAddress(exp ast.Expr) bool { v, ok := u.X.(*ast.Ident) if !ok { var s *ast.SelectorExpr - s, ok = u.X.(*ast.SelectorExpr) + s, ok = u.X.(*ast.SelectorExpr) // TODO: possible BUG: if it's `=` and not `:=`, it means that in the last return `ok` is always true if !ok { return false } @@ -154,7 +154,7 @@ func (bw rangeBodyVisitor) isAccessingRangeValueAddress(exp ast.Expr) bool { } } - return ok && v.Obj == bw.valueID + return ok && v.Obj == bw.valueID // TODO: ok is always true due to the previous TODO remark } func (bw rangeBodyVisitor) newFailure(node ast.Node) lint.Failure { diff --git a/vendor/github.com/mgechev/revive/rule/range_val_in_closure.go b/vendor/github.com/mgechev/revive/rule/range_val_in_closure.go index 6f9255a74..92078288f 100644 --- a/vendor/github.com/mgechev/revive/rule/range_val_in_closure.go +++ b/vendor/github.com/mgechev/revive/rule/range_val_in_closure.go @@ -7,7 +7,7 @@ import ( "github.com/mgechev/revive/lint" ) -// RangeValInClosureRule lints given else constructs. +// RangeValInClosureRule warns if range value is used in a closure dispatched as goroutine. type RangeValInClosureRule struct{} // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/receiver_naming.go b/vendor/github.com/mgechev/revive/rule/receiver_naming.go index c83bacc2f..0327feed6 100644 --- a/vendor/github.com/mgechev/revive/rule/receiver_naming.go +++ b/vendor/github.com/mgechev/revive/rule/receiver_naming.go @@ -3,30 +3,29 @@ package rule import ( "fmt" "go/ast" - "sync" "github.com/mgechev/revive/internal/typeparams" "github.com/mgechev/revive/lint" ) -// ReceiverNamingRule lints given else constructs. +// ReceiverNamingRule lints a receiver name. type ReceiverNamingRule struct { receiverNameMaxLength int - - configureOnce sync.Once } const defaultReceiverNameMaxLength = -1 // thus will not check - -func (r *ReceiverNamingRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *ReceiverNamingRule) Configure(arguments lint.Arguments) error { r.receiverNameMaxLength = defaultReceiverNameMaxLength if len(arguments) < 1 { - return + return nil } args, ok := arguments[0].(map[string]any) if !ok { - panic(fmt.Sprintf("Unable to get arguments for rule %s. Expected object of key-value-pairs.", r.Name())) + return fmt.Errorf("unable to get arguments for rule %s. Expected object of key-value-pairs", r.Name()) } for k, v := range args { @@ -34,31 +33,75 @@ func (r *ReceiverNamingRule) configure(arguments lint.Arguments) { case "maxLength": value, ok := v.(int64) if !ok { - panic(fmt.Sprintf("Invalid value %v for argument %s of rule %s, expected integer value got %T", v, k, r.Name(), v)) + return fmt.Errorf("invalid value %v for argument %s of rule %s, expected integer value got %T", v, k, r.Name(), v) } r.receiverNameMaxLength = int(value) default: - panic(fmt.Sprintf("Unknown argument %s for %s rule.", k, r.Name())) + return fmt.Errorf("unknown argument %s for %s rule", k, r.Name()) } } + return nil } // Apply applies the rule to given file. -func (r *ReceiverNamingRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *ReceiverNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + typeReceiver := map[string]string{} var failures []lint.Failure + for _, decl := range file.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + continue + } - fileAst := file.AST - walker := lintReceiverName{ - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - typeReceiver: map[string]string{}, - receiverNameMaxLength: r.receiverNameMaxLength, - } + names := fn.Recv.List[0].Names + if len(names) < 1 { + continue + } + name := names[0].Name + + if name == "_" { + failures = append(failures, lint.Failure{ + Node: decl, + Confidence: 1, + Category: lint.FailureCategoryNaming, + Failure: "receiver name should not be an underscore, omit the name if it is unused", + }) + continue + } - ast.Walk(walker, fileAst) + if name == "this" || name == "self" { + failures = append(failures, lint.Failure{ + Node: decl, + Confidence: 1, + Category: lint.FailureCategoryNaming, + Failure: `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`, + }) + continue + } + + if r.receiverNameMaxLength > 0 && len([]rune(name)) > r.receiverNameMaxLength { + failures = append(failures, lint.Failure{ + Node: decl, + Confidence: 1, + Category: lint.FailureCategoryNaming, + Failure: fmt.Sprintf("receiver name %s is longer than %d characters", name, r.receiverNameMaxLength), + }) + continue + } + + recv := typeparams.ReceiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + failures = append(failures, lint.Failure{ + Node: decl, + Confidence: 1, + Category: lint.FailureCategoryNaming, + Failure: fmt.Sprintf("receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv), + }) + continue + } + + typeReceiver[recv] = name + } return failures } @@ -67,62 +110,3 @@ func (r *ReceiverNamingRule) Apply(file *lint.File, args lint.Arguments) []lint. func (*ReceiverNamingRule) Name() string { return "receiver-naming" } - -type lintReceiverName struct { - onFailure func(lint.Failure) - typeReceiver map[string]string - receiverNameMaxLength int -} - -func (w lintReceiverName) Visit(n ast.Node) ast.Visitor { - fn, ok := n.(*ast.FuncDecl) - if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { - return w - } - names := fn.Recv.List[0].Names - if len(names) < 1 { - return w - } - name := names[0].Name - if name == "_" { - w.onFailure(lint.Failure{ - Node: n, - Confidence: 1, - Category: "naming", - Failure: "receiver name should not be an underscore, omit the name if it is unused", - }) - return w - } - if name == "this" || name == "self" { - w.onFailure(lint.Failure{ - Node: n, - Confidence: 1, - Category: "naming", - Failure: `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`, - }) - return w - } - - if w.receiverNameMaxLength > 0 && len([]rune(name)) > w.receiverNameMaxLength { - w.onFailure(lint.Failure{ - Node: n, - Confidence: 1, - Category: "naming", - Failure: fmt.Sprintf("receiver name %s is longer than %d characters", name, w.receiverNameMaxLength), - }) - return w - } - - recv := typeparams.ReceiverType(fn) - if prev, ok := w.typeReceiver[recv]; ok && prev != name { - w.onFailure(lint.Failure{ - Node: n, - Confidence: 1, - Category: "naming", - Failure: fmt.Sprintf("receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv), - }) - return w - } - w.typeReceiver[recv] = name - return w -} diff --git a/vendor/github.com/mgechev/revive/rule/redefines_builtin_id.go b/vendor/github.com/mgechev/revive/rule/redefines_builtin_id.go index 10ea16ae1..62f8d7a21 100644 --- a/vendor/github.com/mgechev/revive/rule/redefines_builtin_id.go +++ b/vendor/github.com/mgechev/revive/rule/redefines_builtin_id.go @@ -198,11 +198,11 @@ func (w *lintRedefinesBuiltinID) Visit(node ast.Node) ast.Visitor { return w } -func (w lintRedefinesBuiltinID) addFailure(node ast.Node, msg string) { +func (w *lintRedefinesBuiltinID) addFailure(node ast.Node, msg string) { w.onFailure(lint.Failure{ Confidence: 1, Node: node, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: msg, }) } diff --git a/vendor/github.com/mgechev/revive/rule/redundant_build_tag.go b/vendor/github.com/mgechev/revive/rule/redundant_build_tag.go new file mode 100644 index 000000000..d195ce6e4 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/redundant_build_tag.go @@ -0,0 +1,41 @@ +package rule + +import ( + "strings" + + "github.com/mgechev/revive/lint" +) + +// RedundantBuildTagRule lints the presence of redundant build tags. +type RedundantBuildTagRule struct{} + +// Apply triggers if an old build tag `// +build` is found after a new one `//go:build`. +// `//go:build` comments are automatically added by gofmt when Go 1.17+ is used. +// See https://pkg.go.dev/cmd/go#hdr-Build_constraints +func (*RedundantBuildTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + for _, group := range file.AST.Comments { + hasGoBuild := false + for _, comment := range group.List { + if strings.HasPrefix(comment.Text, "//go:build ") { + hasGoBuild = true + continue + } + + if hasGoBuild && strings.HasPrefix(comment.Text, "// +build ") { + return []lint.Failure{{ + Category: lint.FailureCategoryStyle, + Confidence: 1, + Node: comment, + Failure: `The build tag "// +build" is redundant since Go 1.17 and can be removed`, + }} + } + } + } + + return []lint.Failure{} +} + +// Name returns the rule name. +func (*RedundantBuildTagRule) Name() string { + return "redundant-build-tag" +} diff --git a/vendor/github.com/mgechev/revive/rule/redundant_import_alias.go b/vendor/github.com/mgechev/revive/rule/redundant_import_alias.go index fa5281f24..692507a27 100644 --- a/vendor/github.com/mgechev/revive/rule/redundant_import_alias.go +++ b/vendor/github.com/mgechev/revive/rule/redundant_import_alias.go @@ -8,7 +8,7 @@ import ( "github.com/mgechev/revive/lint" ) -// RedundantImportAlias lints given else constructs. +// RedundantImportAlias warns on import aliases matching the imported package name. type RedundantImportAlias struct{} // Apply applies the rule to given file. @@ -25,7 +25,7 @@ func (*RedundantImportAlias) Apply(file *lint.File, _ lint.Arguments) []lint.Fai Confidence: 1, Failure: fmt.Sprintf("Import alias \"%s\" is redundant", imp.Name.Name), Node: imp, - Category: "imports", + Category: lint.FailureCategoryImports, }) } } diff --git a/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go b/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go new file mode 100644 index 000000000..d456aa215 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/redundant_test_main_exit.go @@ -0,0 +1,79 @@ +package rule + +import ( + "fmt" + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// RedundantTestMainExitRule suggests removing Exit call in TestMain function for test files. +type RedundantTestMainExitRule struct{} + +// Apply applies the rule to given file. +func (*RedundantTestMainExitRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + if !file.IsTest() || !file.Pkg.IsAtLeastGo115() { + // skip analysis for non-test files or for Go versions before 1.15 + return failures + } + + onFailure := func(failure lint.Failure) { + failures = append(failures, failure) + } + + w := &lintRedundantTestMainExit{onFailure: onFailure} + ast.Walk(w, file.AST) + return failures +} + +// Name returns the rule name. +func (*RedundantTestMainExitRule) Name() string { + return "redundant-test-main-exit" +} + +type lintRedundantTestMainExit struct { + onFailure func(lint.Failure) +} + +func (w *lintRedundantTestMainExit) Visit(node ast.Node) ast.Visitor { + if fd, ok := node.(*ast.FuncDecl); ok { + if fd.Name.Name != "TestMain" { + return nil // skip analysis for other functions than TestMain + } + + return w + } + + se, ok := node.(*ast.ExprStmt) + if !ok { + return w + } + ce, ok := se.X.(*ast.CallExpr) + if !ok { + return w + } + + fc, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return w + } + id, ok := fc.X.(*ast.Ident) + if !ok { + return w + } + + pkg := id.Name + fn := fc.Sel.Name + if isCallToExitFunction(pkg, fn) { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: ce, + Category: lint.FailureCategoryStyle, + Failure: fmt.Sprintf("redundant call to %s.%s in TestMain function, the test runner will handle it automatically as of Go 1.15", pkg, fn), + }) + } + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/string_format.go b/vendor/github.com/mgechev/revive/rule/string_format.go index ecac3fa7c..a3beac43d 100644 --- a/vendor/github.com/mgechev/revive/rule/string_format.go +++ b/vendor/github.com/mgechev/revive/rule/string_format.go @@ -11,8 +11,6 @@ import ( "github.com/mgechev/revive/lint" ) -// #region Revive API - // StringFormatRule lints strings and/or comments according to a set of regular expressions given as Arguments type StringFormatRule struct{} @@ -24,8 +22,12 @@ func (*StringFormatRule) Apply(file *lint.File, arguments lint.Arguments) []lint failures = append(failures, failure) } - w := lintStringFormatRule{onFailure: onFailure} - w.parseArguments(arguments) + w := &lintStringFormatRule{onFailure: onFailure} + err := w.parseArguments(arguments) + if err != nil { + return newInternalFailureError(err) + } + ast.Walk(w, file.AST) return failures @@ -37,16 +39,15 @@ func (*StringFormatRule) Name() string { } // ParseArgumentsTest is a public wrapper around w.parseArguments used for testing. Returns the error message provided to panic, or nil if no error was encountered -func (StringFormatRule) ParseArgumentsTest(arguments lint.Arguments) *string { +func (*StringFormatRule) ParseArgumentsTest(arguments lint.Arguments) *string { w := lintStringFormatRule{} c := make(chan any) // Parse the arguments in a goroutine, defer a recover() call, return the error encountered (or nil if there was no error) go func() { defer func() { - err := recover() + err := w.parseArguments(arguments) c <- err }() - w.parseArguments(arguments) }() err := <-c if err != nil { @@ -56,10 +57,6 @@ func (StringFormatRule) ParseArgumentsTest(arguments lint.Arguments) *string { return nil } -// #endregion - -// #region Internal structure - type lintStringFormatRule struct { onFailure func(lint.Failure) rules []stringFormatSubrule @@ -87,13 +84,12 @@ const identRegex = "[_A-Za-z][_A-Za-z0-9]*" var parseStringFormatScope = regexp.MustCompile( fmt.Sprintf("^(%s(?:\\.%s)?)(?:\\[([0-9]+)\\](?:\\.(%s))?)?$", identRegex, identRegex, identRegex)) -// #endregion - -// #region Argument parsing - -func (w *lintStringFormatRule) parseArguments(arguments lint.Arguments) { +func (w *lintStringFormatRule) parseArguments(arguments lint.Arguments) error { for i, argument := range arguments { - scopes, regex, negated, errorMessage := w.parseArgument(argument, i) + scopes, regex, negated, errorMessage, err := w.parseArgument(argument, i) + if err != nil { + return err + } w.rules = append(w.rules, stringFormatSubrule{ parent: w, scopes: scopes, @@ -102,30 +98,31 @@ func (w *lintStringFormatRule) parseArguments(arguments lint.Arguments) { errorMessage: errorMessage, }) } + return nil } -func (w lintStringFormatRule) parseArgument(argument any, ruleNum int) (scopes stringFormatSubruleScopes, regex *regexp.Regexp, negated bool, errorMessage string) { +func (w *lintStringFormatRule) parseArgument(argument any, ruleNum int) (scopes stringFormatSubruleScopes, regex *regexp.Regexp, negated bool, errorMessage string, err error) { g, ok := argument.([]any) // Cast to generic slice first if !ok { - w.configError("argument is not a slice", ruleNum, 0) + return stringFormatSubruleScopes{}, regex, false, "", w.configError("argument is not a slice", ruleNum, 0) } if len(g) < 2 { - w.configError("less than two slices found in argument, scope and regex are required", ruleNum, len(g)-1) + return stringFormatSubruleScopes{}, regex, false, "", w.configError("less than two slices found in argument, scope and regex are required", ruleNum, len(g)-1) } rule := make([]string, len(g)) for i, obj := range g { val, ok := obj.(string) if !ok { - w.configError("unexpected value, string was expected", ruleNum, i) + return stringFormatSubruleScopes{}, regex, false, "", w.configError("unexpected value, string was expected", ruleNum, i) } rule[i] = val } // Validate scope and regex length if rule[0] == "" { - w.configError("empty scope provided", ruleNum, 0) + return stringFormatSubruleScopes{}, regex, false, "", w.configError("empty scope provided", ruleNum, 0) } else if len(rule[1]) < 2 { - w.configError("regex is too small (regexes should begin and end with '/')", ruleNum, 1) + return stringFormatSubruleScopes{}, regex, false, "", w.configError("regex is too small (regexes should begin and end with '/')", ruleNum, 1) } // Parse rule scopes @@ -136,24 +133,25 @@ func (w lintStringFormatRule) parseArgument(argument any, ruleNum int) (scopes s rawScope = strings.TrimSpace(rawScope) if len(rawScope) == 0 { - w.parseScopeError("empty scope in rule scopes:", ruleNum, 0, scopeNum) + return stringFormatSubruleScopes{}, regex, false, "", w.parseScopeError("empty scope in rule scopes:", ruleNum, 0, scopeNum) } scope := stringFormatSubruleScope{} matches := parseStringFormatScope.FindStringSubmatch(rawScope) if matches == nil { // The rule's scope didn't match the parsing regex at all, probably a configuration error - w.parseScopeError("unable to parse rule scope", ruleNum, 0, scopeNum) + return stringFormatSubruleScopes{}, regex, false, "", w.parseScopeError("unable to parse rule scope", ruleNum, 0, scopeNum) } else if len(matches) != 4 { // The rule's scope matched the parsing regex, but an unexpected number of submatches was returned, probably a bug - w.parseScopeError(fmt.Sprintf("unexpected number of submatches when parsing scope: %d, expected 4", len(matches)), ruleNum, 0, scopeNum) + return stringFormatSubruleScopes{}, regex, false, "", + w.parseScopeError(fmt.Sprintf("unexpected number of submatches when parsing scope: %d, expected 4", len(matches)), ruleNum, 0, scopeNum) } scope.funcName = matches[1] if len(matches[2]) > 0 { var err error scope.argument, err = strconv.Atoi(matches[2]) if err != nil { - w.parseScopeError("unable to parse argument number in rule scope", ruleNum, 0, scopeNum) + return stringFormatSubruleScopes{}, regex, false, "", w.parseScopeError("unable to parse argument number in rule scope", ruleNum, 0, scopeNum) } } if len(matches[3]) > 0 { @@ -169,38 +167,34 @@ func (w lintStringFormatRule) parseArgument(argument any, ruleNum int) (scopes s if negated { offset++ } - regex, err := regexp.Compile(rule[1][offset : len(rule[1])-1]) - if err != nil { - w.parseError(fmt.Sprintf("unable to compile %s as regexp", rule[1]), ruleNum, 1) + regex, errr := regexp.Compile(rule[1][offset : len(rule[1])-1]) + if errr != nil { + return stringFormatSubruleScopes{}, regex, false, "", w.parseError(fmt.Sprintf("unable to compile %s as regexp", rule[1]), ruleNum, 1) } // Use custom error message if provided if len(rule) == 3 { errorMessage = rule[2] } - return scopes, regex, negated, errorMessage + return scopes, regex, negated, errorMessage, nil } // Report an invalid config, this is specifically the user's fault -func (lintStringFormatRule) configError(msg string, ruleNum, option int) { - panic(fmt.Sprintf("invalid configuration for string-format: %s [argument %d, option %d]", msg, ruleNum, option)) +func (*lintStringFormatRule) configError(msg string, ruleNum, option int) error { + return fmt.Errorf("invalid configuration for string-format: %s [argument %d, option %d]", msg, ruleNum, option) } // Report a general config parsing failure, this may be the user's fault, but it isn't known for certain -func (lintStringFormatRule) parseError(msg string, ruleNum, option int) { - panic(fmt.Sprintf("failed to parse configuration for string-format: %s [argument %d, option %d]", msg, ruleNum, option)) +func (*lintStringFormatRule) parseError(msg string, ruleNum, option int) error { + return fmt.Errorf("failed to parse configuration for string-format: %s [argument %d, option %d]", msg, ruleNum, option) } // Report a general scope config parsing failure, this may be the user's fault, but it isn't known for certain -func (lintStringFormatRule) parseScopeError(msg string, ruleNum, option, scopeNum int) { - panic(fmt.Sprintf("failed to parse configuration for string-format: %s [argument %d, option %d, scope index %d]", msg, ruleNum, option, scopeNum)) +func (*lintStringFormatRule) parseScopeError(msg string, ruleNum, option, scopeNum int) error { + return fmt.Errorf("failed to parse configuration for string-format: %s [argument %d, option %d, scope index %d]", msg, ruleNum, option, scopeNum) } -// #endregion - -// #region Node traversal - -func (w lintStringFormatRule) Visit(node ast.Node) ast.Visitor { +func (w *lintStringFormatRule) Visit(node ast.Node) ast.Visitor { // First, check if node is a call expression call, ok := node.(*ast.CallExpr) if !ok { @@ -225,7 +219,7 @@ func (w lintStringFormatRule) Visit(node ast.Node) ast.Visitor { } // Return the name of a call expression in the form of package.Func or Func -func (lintStringFormatRule) getCallName(call *ast.CallExpr) (callName string, ok bool) { +func (*lintStringFormatRule) getCallName(call *ast.CallExpr) (callName string, ok bool) { if ident, ok := call.Fun.(*ast.Ident); ok { // Local function call return ident.Name, true @@ -247,10 +241,6 @@ func (lintStringFormatRule) getCallName(call *ast.CallExpr) (callName string, ok return "", false } -// #endregion - -// #region Linting logic - // apply a single format rule to a call expression (should be done after verifying the that the call expression matches the rule's scope) func (r *stringFormatSubrule) apply(call *ast.CallExpr, scope *stringFormatSubruleScope) { if len(call.Args) <= scope.argument { @@ -289,6 +279,12 @@ func (r *stringFormatSubrule) apply(call *ast.CallExpr, scope *stringFormatSubru return } } + + // extra safety check + if lit == nil { + return + } + // Unquote the string literal before linting unquoted := lit.Value[1 : len(lit.Value)-1] if r.stringIsOK(unquoted) { @@ -324,5 +320,3 @@ func (r *stringFormatSubrule) generateFailure(node ast.Node) { Node: node, }) } - -// #endregion diff --git a/vendor/github.com/mgechev/revive/rule/struct_tag.go b/vendor/github.com/mgechev/revive/rule/struct_tag.go index 4dd927827..00a2b964c 100644 --- a/vendor/github.com/mgechev/revive/rule/struct_tag.go +++ b/vendor/github.com/mgechev/revive/rule/struct_tag.go @@ -5,7 +5,6 @@ import ( "go/ast" "strconv" "strings" - "sync" "github.com/fatih/structtag" "github.com/mgechev/revive/lint" @@ -14,25 +13,29 @@ import ( // StructTagRule lints struct tags. type StructTagRule struct { userDefined map[string][]string // map: key -> []option - - configureOnce sync.Once } -func (r *StructTagRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *StructTagRule) Configure(arguments lint.Arguments) error { if len(arguments) == 0 { - return + return nil } - checkNumberOfArguments(1, arguments, r.Name()) + err := checkNumberOfArguments(1, arguments, r.Name()) + if err != nil { + return err + } r.userDefined = make(map[string][]string, len(arguments)) for _, arg := range arguments { item, ok := arg.(string) if !ok { - panic(fmt.Sprintf("Invalid argument to the %s rule. Expecting a string, got %v (of type %T)", r.Name(), arg, arg)) + return fmt.Errorf("invalid argument to the %s rule. Expecting a string, got %v (of type %T)", r.Name(), arg, arg) } parts := strings.Split(item, ",") if len(parts) < 2 { - panic(fmt.Sprintf("Invalid argument to the %s rule. Expecting a string of the form key[,option]+, got %s", r.Name(), item)) + return fmt.Errorf("invalid argument to the %s rule. Expecting a string of the form key[,option]+, got %s", r.Name(), item) } key := strings.TrimSpace(parts[0]) for i := 1; i < len(parts); i++ { @@ -40,20 +43,20 @@ func (r *StructTagRule) configure(arguments lint.Arguments) { r.userDefined[key] = append(r.userDefined[key], option) } } + return nil } // Apply applies the rule to given file. -func (r *StructTagRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *StructTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } w := lintStructTagRule{ - onFailure: onFailure, - userDefined: r.userDefined, + onFailure: onFailure, + userDefined: r.userDefined, + isAtLeastGo124: file.Pkg.IsAtLeastGo124(), } ast.Walk(w, file.AST) @@ -67,10 +70,11 @@ func (*StructTagRule) Name() string { } type lintStructTagRule struct { - onFailure func(lint.Failure) - userDefined map[string][]string // map: key -> []option - usedTagNbr map[int]bool // list of used tag numbers - usedTagName map[string]bool // list of used tag keys + onFailure func(lint.Failure) + userDefined map[string][]string // map: key -> []option + usedTagNbr map[int]bool // list of used tag numbers + usedTagName map[string]bool // list of used tag keys + isAtLeastGo124 bool } func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { @@ -93,14 +97,16 @@ func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { return w } -const keyASN1 = "asn1" -const keyBSON = "bson" -const keyDefault = "default" -const keyJSON = "json" -const keyProtobuf = "protobuf" -const keyRequired = "required" -const keyXML = "xml" -const keyYAML = "yaml" +const ( + keyASN1 = "asn1" + keyBSON = "bson" + keyDefault = "default" + keyJSON = "json" + keyProtobuf = "protobuf" + keyRequired = "required" + keyXML = "xml" + keyYAML = "yaml" +) func (w lintStructTagRule) checkTagNameIfNeed(tag *structtag.Tag) (string, bool) { isUnnamedTag := tag.Name == "" || tag.Name == "-" @@ -108,13 +114,9 @@ func (w lintStructTagRule) checkTagNameIfNeed(tag *structtag.Tag) (string, bool) return "", true } - needsToCheckTagName := tag.Key == keyBSON || - tag.Key == keyJSON || - tag.Key == keyXML || - tag.Key == keyYAML || - tag.Key == keyProtobuf - - if !needsToCheckTagName { + switch tag.Key { + case keyBSON, keyJSON, keyXML, keyYAML, keyProtobuf: + default: return "", true } @@ -139,8 +141,8 @@ func (lintStructTagRule) getTagName(tag *structtag.Tag) string { switch tag.Key { case keyProtobuf: for _, option := range tag.Options { - if strings.HasPrefix(option, "name=") { - return strings.TrimPrefix(option, "name=") + if tagName, found := strings.CutPrefix(option, "name="); found { + return tagName } } return "" // protobuf tag lacks 'name' option @@ -281,6 +283,11 @@ func (w lintStructTagRule) checkJSONTag(name string, options []string) (string, if name != "-" { return "option can not be empty in JSON tag", false } + case "omitzero": + if w.isAtLeastGo124 { + continue + } + fallthrough default: if w.isUserDefined(keyJSON, opt) { continue diff --git a/vendor/github.com/mgechev/revive/rule/superfluous_else.go b/vendor/github.com/mgechev/revive/rule/superfluous_else.go index 18e8f3bdd..c9474d9c3 100644 --- a/vendor/github.com/mgechev/revive/rule/superfluous_else.go +++ b/vendor/github.com/mgechev/revive/rule/superfluous_else.go @@ -12,7 +12,7 @@ type SuperfluousElseRule struct{} // Apply applies the rule to given file. func (e *SuperfluousElseRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - return ifelse.Apply(e, file.AST, ifelse.TargetElse, args) + return ifelse.Apply(e.checkIfElse, file.AST, ifelse.TargetElse, args) } // Name returns the rule name. @@ -20,28 +20,31 @@ func (*SuperfluousElseRule) Name() string { return "superfluous-else" } -// CheckIfElse evaluates the rule against an ifelse.Chain and returns a failure message if applicable. -func (*SuperfluousElseRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) string { +func (*SuperfluousElseRule) checkIfElse(chain ifelse.Chain, args ifelse.Args) (string, bool) { + if !chain.HasElse { + return "", false + } + if !chain.If.Deviates() { // this rule only applies if the if-block deviates control flow - return "" + return "", false } if chain.HasPriorNonDeviating { // if we de-indent the "else" block then a previous branch - // might flow into it, affecting program behaviour - return "" + // might flow into it, affecting program behavior + return "", false } if chain.If.Returns() { // avoid overlapping with indent-error-flow - return "" + return "", false } - if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls) { + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls()) { // avoid increasing variable scope - return "" + return "", false } - return fmt.Sprintf("if block ends with %v, so drop this else and outdent its block", chain.If.LongString()) + return fmt.Sprintf("if block ends with %v, so drop this else and outdent its block", chain.If.LongString()), true } diff --git a/vendor/github.com/mgechev/revive/rule/time_equal.go b/vendor/github.com/mgechev/revive/rule/time_equal.go index a4fab88b3..769b15406 100644 --- a/vendor/github.com/mgechev/revive/rule/time_equal.go +++ b/vendor/github.com/mgechev/revive/rule/time_equal.go @@ -63,7 +63,7 @@ func (l *lintTimeEqual) Visit(node ast.Node) ast.Visitor { } l.onFailure(lint.Failure{ - Category: "time", + Category: lint.FailureCategoryTime, Confidence: 1, Node: node, Failure: fmt.Sprintf("use %s%s.Equal(%s) instead of %q operator", negateStr, gofmt(expr.X), gofmt(expr.Y), expr.Op), diff --git a/vendor/github.com/mgechev/revive/rule/time_naming.go b/vendor/github.com/mgechev/revive/rule/time_naming.go index 5bccf8a7a..5bbe8aa06 100644 --- a/vendor/github.com/mgechev/revive/rule/time_naming.go +++ b/vendor/github.com/mgechev/revive/rule/time_naming.go @@ -9,7 +9,7 @@ import ( "github.com/mgechev/revive/lint" ) -// TimeNamingRule lints given else constructs. +// TimeNamingRule lints the name of a time variable. type TimeNamingRule struct{} // Apply applies the rule to given file. @@ -64,7 +64,7 @@ func (w *lintTimeNames) Visit(node ast.Node) ast.Visitor { continue } w.onFailure(lint.Failure{ - Category: "time", + Category: lint.FailureCategoryTime, Confidence: 0.9, Node: v, Failure: fmt.Sprintf("var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix), diff --git a/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go b/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go index 34d854e8f..a272724dd 100644 --- a/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go +++ b/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go @@ -1,9 +1,9 @@ package rule import ( + "errors" "fmt" "go/ast" - "sync" "github.com/mgechev/revive/lint" ) @@ -16,18 +16,19 @@ const ( // UncheckedTypeAssertionRule lints missing or ignored `ok`-value in dynamic type casts. type UncheckedTypeAssertionRule struct { acceptIgnoredAssertionResult bool - - configureOnce sync.Once } -func (r *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *UncheckedTypeAssertionRule) Configure(arguments lint.Arguments) error { if len(arguments) == 0 { - return + return nil } args, ok := arguments[0].(map[string]any) if !ok { - panic("Unable to get arguments. Expected object of key-value-pairs.") + return errors.New("unable to get arguments. Expected object of key-value-pairs") } for k, v := range args { @@ -35,18 +36,17 @@ func (r *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { case "acceptIgnoredAssertionResult": r.acceptIgnoredAssertionResult, ok = v.(bool) if !ok { - panic(fmt.Sprintf("Unable to parse argument '%s'. Expected boolean.", k)) + return fmt.Errorf("unable to parse argument '%s'. Expected boolean", k) } default: - panic(fmt.Sprintf("Unknown argument: %s", k)) + return fmt.Errorf("unknown argument: %s", k) } } + return nil } // Apply applies the rule to given file. -func (r *UncheckedTypeAssertionRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *UncheckedTypeAssertionRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure walker := &lintUncheckedTypeAssertion{ @@ -181,7 +181,7 @@ func (w *lintUncheckedTypeAssertion) Visit(node ast.Node) ast.Visitor { func (w *lintUncheckedTypeAssertion) addFailure(n *ast.TypeAssertExpr, why string) { s := fmt.Sprintf("type cast result is unchecked in %v - %s", gofmt(n), why) w.onFailure(lint.Failure{ - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Confidence: 1, Node: n, Failure: s, diff --git a/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go b/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go index d806b6757..b59275d89 100644 --- a/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go +++ b/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go @@ -6,7 +6,7 @@ import ( "github.com/mgechev/revive/lint" ) -// UnconditionalRecursionRule lints given else constructs. +// UnconditionalRecursionRule warns on function calls that will lead to infinite recursion. type UnconditionalRecursionRule struct{} // Apply applies the rule to given file. @@ -17,8 +17,35 @@ func (*UnconditionalRecursionRule) Apply(file *lint.File, _ lint.Arguments) []li failures = append(failures, failure) } - w := lintUnconditionalRecursionRule{onFailure: onFailure} - ast.Walk(w, file.AST) + // Range over global declarations of the file to detect func/method declarations and analyze them + for _, decl := range file.AST.Decls { + n, ok := decl.(*ast.FuncDecl) + if !ok { + continue // not a func/method declaration + } + + if n.Body == nil { + continue // func/method with empty body => it can not be recursive + } + + var rec *ast.Ident + switch { + case n.Recv == nil: + rec = nil + case n.Recv.NumFields() < 1 || len(n.Recv.List[0].Names) < 1: + rec = &ast.Ident{Name: "_"} + default: + rec = n.Recv.List[0].Names[0] + } + + w := &lintUnconditionalRecursionRule{ + onFailure: onFailure, + currentFunc: &funcStatus{&funcDesc{rec, n.Name}, false}, + } + + ast.Walk(w, n.Body) + } + return failures } @@ -50,26 +77,14 @@ type lintUnconditionalRecursionRule struct { inGoStatement bool } -// Visit will traverse the file AST. -// The rule is based in the following algorithm: inside each function body we search for calls to the function itself. +// Visit will traverse function's body we search for calls to the function itself. // We do not search inside conditional control structures (if, for, switch, ...) because any recursive call inside them is conditioned // We do search inside conditional control structures are statements that will take the control out of the function (return, exit, panic) // If we find conditional control exits, it means the function is NOT unconditionally-recursive // If we find a recursive call before finding any conditional exit, a failure is generated -// In resume: if we found a recursive call control-dependant from the entry point of the function then we raise a failure. -func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { +// In resume: if we found a recursive call control-dependent from the entry point of the function then we raise a failure. +func (w *lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { switch n := node.(type) { - case *ast.FuncDecl: - var rec *ast.Ident - switch { - case n.Recv == nil: - rec = nil - case n.Recv.NumFields() < 1 || len(n.Recv.List[0].Names) < 1: - rec = &ast.Ident{Name: "_"} - default: - rec = n.Recv.List[0].Names[0] - } - w.currentFunc = &funcStatus{&funcDesc{rec, n.Name}, false} case *ast.CallExpr: // check if call arguments has a recursive call for _, arg := range n.Args { @@ -100,7 +115,7 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { !w.currentFunc.seenConditionalExit && // there is a conditional exit in the function w.currentFunc.funcDesc.equal(&funcDesc{selector, funcID}) { w.onFailure(lint.Failure{ - Category: "logic", + Category: lint.FailureCategoryLogic, Confidence: 0.8, Node: n, Failure: "unconditional recursive call", @@ -152,20 +167,7 @@ func (w *lintUnconditionalRecursionRule) updateFuncStatus(node ast.Node) { w.currentFunc.seenConditionalExit = w.hasControlExit(node) } -var exitFunctions = map[string]map[string]bool{ - "os": {"Exit": true}, - "syscall": {"Exit": true}, - "log": { - "Fatal": true, - "Fatalf": true, - "Fatalln": true, - "Panic": true, - "Panicf": true, - "Panicln": true, - }, -} - -func (lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { +func (*lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { // isExit returns true if the given node makes control exit the function isExit := func(node ast.Node) bool { switch n := node.(type) { @@ -187,8 +189,7 @@ func (lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { functionName := se.Sel.Name pkgName := id.Name - isCallToExitFunction := exitFunctions[pkgName] != nil && exitFunctions[pkgName][functionName] - if isCallToExitFunction { + if isCallToExitFunction(pkgName, functionName) { return true } } diff --git a/vendor/github.com/mgechev/revive/rule/unexported_naming.go b/vendor/github.com/mgechev/revive/rule/unexported_naming.go index 0c2b39d41..ceb096a41 100644 --- a/vendor/github.com/mgechev/revive/rule/unexported_naming.go +++ b/vendor/github.com/mgechev/revive/rule/unexported_naming.go @@ -107,7 +107,7 @@ func (unl unexportablenamingLinter) lintIDs(ids []*ast.Ident) { unl.onFailure(lint.Failure{ Node: id, Confidence: 1, - Category: "naming", + Category: lint.FailureCategoryNaming, Failure: fmt.Sprintf("the symbol %s is local, its name should start with a lowercase letter", id.String()), }) } diff --git a/vendor/github.com/mgechev/revive/rule/unexported_return.go b/vendor/github.com/mgechev/revive/rule/unexported_return.go index 10f8e3fbe..0d88a957c 100644 --- a/vendor/github.com/mgechev/revive/rule/unexported_return.go +++ b/vendor/github.com/mgechev/revive/rule/unexported_return.go @@ -9,24 +9,54 @@ import ( "github.com/mgechev/revive/lint" ) -// UnexportedReturnRule lints given else constructs. +// UnexportedReturnRule warns when a public return is from unexported type. type UnexportedReturnRule struct{} // Apply applies the rule to given file. func (*UnexportedReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - fileAst := file.AST - walker := lintUnexportedReturn{ - file: file, - fileAst: fileAst, - onFailure: func(failure lint.Failure) { - failures = append(failures, failure) - }, - } + for _, decl := range file.AST.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + + if fn.Type.Results == nil { + continue + } + + if !fn.Name.IsExported() { + continue + } + + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(typeparams.ReceiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + continue + } + } - file.Pkg.TypeCheck() - ast.Walk(walker, fileAst) + for _, ret := range fn.Type.Results.List { + typ := file.Pkg.TypeOf(ret.Type) + if exportedType(typ) { + continue + } + + failures = append(failures, lint.Failure{ + Category: lint.FailureCategoryUnexportedTypeInAPI, + Node: ret.Type, + Confidence: 0.8, + Failure: fmt.Sprintf("exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ), + }) + + break // only flag one + } + } return failures } @@ -36,49 +66,6 @@ func (*UnexportedReturnRule) Name() string { return "unexported-return" } -type lintUnexportedReturn struct { - file *lint.File - fileAst *ast.File - onFailure func(lint.Failure) -} - -func (w lintUnexportedReturn) Visit(n ast.Node) ast.Visitor { - fn, ok := n.(*ast.FuncDecl) - if !ok { - return w - } - if fn.Type.Results == nil { - return nil - } - if !fn.Name.IsExported() { - return nil - } - thing := "func" - if fn.Recv != nil && len(fn.Recv.List) > 0 { - thing = "method" - if !ast.IsExported(typeparams.ReceiverType(fn)) { - // Don't report exported methods of unexported types, - // such as private implementations of sort.Interface. - return nil - } - } - for _, ret := range fn.Type.Results.List { - typ := w.file.Pkg.TypeOf(ret.Type) - if exportedType(typ) { - continue - } - w.onFailure(lint.Failure{ - Category: "unexported-type-in-api", - Node: ret.Type, - Confidence: 0.8, - Failure: fmt.Sprintf("exported %s %s returns unexported type %s, which can be annoying to use", - thing, fn.Name.Name, typ), - }) - break // only flag one - } - return nil -} - // exportedType reports whether typ is an exported type. // It is imprecise, and will err on the side of returning true, // such as for composite types. diff --git a/vendor/github.com/mgechev/revive/rule/unhandled_error.go b/vendor/github.com/mgechev/revive/rule/unhandled_error.go index 4fad8ccfc..6ae919bcd 100644 --- a/vendor/github.com/mgechev/revive/rule/unhandled_error.go +++ b/vendor/github.com/mgechev/revive/rule/unhandled_error.go @@ -1,48 +1,48 @@ package rule import ( + "errors" "fmt" "go/ast" "go/types" "regexp" "strings" - "sync" "github.com/mgechev/revive/lint" ) -// UnhandledErrorRule lints given else constructs. +// UnhandledErrorRule warns on unhandled errors returned by function calls. type UnhandledErrorRule struct { ignoreList []*regexp.Regexp - - configureOnce sync.Once } -func (r *UnhandledErrorRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *UnhandledErrorRule) Configure(arguments lint.Arguments) error { for _, arg := range arguments { argStr, ok := arg.(string) if !ok { - panic(fmt.Sprintf("Invalid argument to the unhandled-error rule. Expecting a string, got %T", arg)) + return fmt.Errorf("invalid argument to the unhandled-error rule. Expecting a string, got %T", arg) } argStr = strings.Trim(argStr, " ") if argStr == "" { - panic("Invalid argument to the unhandled-error rule, expected regular expression must not be empty.") + return errors.New("invalid argument to the unhandled-error rule, expected regular expression must not be empty") } exp, err := regexp.Compile(argStr) if err != nil { - panic(fmt.Sprintf("Invalid argument to the unhandled-error rule: regexp %q does not compile: %v", argStr, err)) + return fmt.Errorf("invalid argument to the unhandled-error rule: regexp %q does not compile: %w", argStr, err) } r.ignoreList = append(r.ignoreList, exp) } + return nil } // Apply applies the rule to given file. -func (r *UnhandledErrorRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) - +func (r *UnhandledErrorRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure walker := &lintUnhandledErrors{ @@ -113,7 +113,7 @@ func (w *lintUnhandledErrors) addFailure(n *ast.CallExpr) { } w.onFailure(lint.Failure{ - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Confidence: 1, Node: n, Failure: fmt.Sprintf("Unhandled error in call to function %v", name), diff --git a/vendor/github.com/mgechev/revive/rule/unnecessary_stmt.go b/vendor/github.com/mgechev/revive/rule/unnecessary_stmt.go index 8e0784ba4..c9369ca9f 100644 --- a/vendor/github.com/mgechev/revive/rule/unnecessary_stmt.go +++ b/vendor/github.com/mgechev/revive/rule/unnecessary_stmt.go @@ -101,7 +101,7 @@ func (w lintUnnecessaryStmtRule) newFailure(node ast.Node, msg string) { w.onFailure(lint.Failure{ Confidence: 1, Node: node, - Category: "style", + Category: lint.FailureCategoryStyle, Failure: msg, }) } diff --git a/vendor/github.com/mgechev/revive/rule/unreachable_code.go b/vendor/github.com/mgechev/revive/rule/unreachable_code.go index dcc5b7905..dbc877148 100644 --- a/vendor/github.com/mgechev/revive/rule/unreachable_code.go +++ b/vendor/github.com/mgechev/revive/rule/unreachable_code.go @@ -62,7 +62,6 @@ func (w lintUnreachableCode) Visit(node ast.Node) ast.Visitor { } loop: for i, stmt := range blk.List[:len(blk.List)-1] { - // println("iterating ", len(blk.List)) next := blk.List[i+1] if _, ok := next.(*ast.LabeledStmt); ok { continue // skip if next statement is labeled @@ -116,7 +115,7 @@ func newUnreachableCodeFailure(node ast.Node) lint.Failure { return lint.Failure{ Confidence: 1, Node: node, - Category: "logic", + Category: lint.FailureCategoryLogic, Failure: "unreachable code after this statement", } } diff --git a/vendor/github.com/mgechev/revive/rule/unused_param.go b/vendor/github.com/mgechev/revive/rule/unused_param.go index a8514ac2d..f57c59b06 100644 --- a/vendor/github.com/mgechev/revive/rule/unused_param.go +++ b/vendor/github.com/mgechev/revive/rule/unused_param.go @@ -4,53 +4,53 @@ import ( "fmt" "go/ast" "regexp" - "sync" "github.com/mgechev/revive/lint" ) +var allowBlankIdentifierRegex = regexp.MustCompile("^_$") + // UnusedParamRule lints unused params in functions. type UnusedParamRule struct { // regex to check if some name is valid for unused parameter, "^_$" by default allowRegex *regexp.Regexp failureMsg string - - configureOnce sync.Once } -func (r *UnusedParamRule) configure(args lint.Arguments) { - // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives - // it's more compatible to JSON nature of configurations - var allowedRegexStr string +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *UnusedParamRule) Configure(args lint.Arguments) error { + // while by default args is an array, it could be good to provide structures inside it by default, not arrays or primitives + // as it's more compatible to JSON nature of configurations + r.allowRegex = allowBlankIdentifierRegex + r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it as _" if len(args) == 0 { - allowedRegexStr = "^_$" - r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it as _" - } else { - // Arguments = [{}] - options := args[0].(map[string]any) - // Arguments = [{allowedRegex="^_"}] - - if allowedRegexParam, ok := options["allowRegex"]; ok { - allowedRegexStr, ok = allowedRegexParam.(string) - if !ok { - panic(fmt.Errorf("error configuring %s rule: allowedRegex is not string but [%T]", r.Name(), allowedRegexParam)) - } - } + return nil + } + // Arguments = [{}] + options := args[0].(map[string]any) + + allowRegexParam, ok := options["allowRegex"] + if !ok { + return nil + } + // Arguments = [{allowRegex="^_"}] + allowRegexStr, ok := allowRegexParam.(string) + if !ok { + panic(fmt.Errorf("error configuring %s rule: allowRegex is not string but [%T]", r.Name(), allowRegexParam)) } var err error - r.allowRegex, err = regexp.Compile(allowedRegexStr) + r.allowRegex, err = regexp.Compile(allowRegexStr) if err != nil { - panic(fmt.Errorf("error configuring %s rule: allowedRegex is not valid regex [%s]: %v", r.Name(), allowedRegexStr, err)) - } - - if r.failureMsg == "" { - r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it to match " + r.allowRegex.String() + return fmt.Errorf("error configuring %s rule: allowRegex is not valid regex [%s]: %w", r.Name(), allowRegexStr, err) } + r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it to match " + r.allowRegex.String() + return nil } // Apply applies the rule to given file. -func (r *UnusedParamRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) +func (r *UnusedParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { @@ -129,7 +129,7 @@ func (w lintUnusedParamRule) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "bad practice", + Category: lint.FailureCategoryBadPractice, Failure: fmt.Sprintf(w.failureMsg, n.Name), }) } @@ -139,6 +139,7 @@ func (w lintUnusedParamRule) Visit(node ast.Node) ast.Visitor { return w // full method body was inspected } +// TODO: ast.Object is deprecated func retrieveNamedParams(params *ast.FieldList) map[*ast.Object]bool { result := map[*ast.Object]bool{} if params.List == nil { diff --git a/vendor/github.com/mgechev/revive/rule/unused_receiver.go b/vendor/github.com/mgechev/revive/rule/unused_receiver.go index 131aae5fb..13ca39dff 100644 --- a/vendor/github.com/mgechev/revive/rule/unused_receiver.go +++ b/vendor/github.com/mgechev/revive/rule/unused_receiver.go @@ -4,122 +4,98 @@ import ( "fmt" "go/ast" "regexp" - "sync" "github.com/mgechev/revive/lint" ) -// UnusedReceiverRule lints unused params in functions. +// UnusedReceiverRule lints unused receivers in functions. type UnusedReceiverRule struct { // regex to check if some name is valid for unused parameter, "^_$" by default allowRegex *regexp.Regexp failureMsg string - - configureOnce sync.Once } -func (r *UnusedReceiverRule) configure(args lint.Arguments) { - // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives - // it's more compatible to JSON nature of configurations - var allowedRegexStr string +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *UnusedReceiverRule) Configure(args lint.Arguments) error { + // while by default args is an array, it could be good to provide structures inside it by default, not arrays or primitives + // as it's more compatible to JSON nature of configurations + r.allowRegex = allowBlankIdentifierRegex + r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it as _" if len(args) == 0 { - allowedRegexStr = "^_$" - r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it as _" - } else { - // Arguments = [{}] - options := args[0].(map[string]any) - // Arguments = [{allowedRegex="^_"}] - - if allowedRegexParam, ok := options["allowRegex"]; ok { - allowedRegexStr, ok = allowedRegexParam.(string) - if !ok { - panic(fmt.Errorf("error configuring [unused-receiver] rule: allowedRegex is not string but [%T]", allowedRegexParam)) - } - } + return nil + } + // Arguments = [{}] + options := args[0].(map[string]any) + + allowRegexParam, ok := options["allowRegex"] + if !ok { + return nil + } + // Arguments = [{allowRegex="^_"}] + allowRegexStr, ok := allowRegexParam.(string) + if !ok { + panic(fmt.Errorf("error configuring [unused-receiver] rule: allowRegex is not string but [%T]", allowRegexParam)) } var err error - r.allowRegex, err = regexp.Compile(allowedRegexStr) + r.allowRegex, err = regexp.Compile(allowRegexStr) if err != nil { - panic(fmt.Errorf("error configuring [unused-receiver] rule: allowedRegex is not valid regex [%s]: %v", allowedRegexStr, err)) - } - if r.failureMsg == "" { - r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it to match " + r.allowRegex.String() + return fmt.Errorf("error configuring [unused-receiver] rule: allowRegex is not valid regex [%s]: %w", allowRegexStr, err) } + r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it to match " + r.allowRegex.String() + return nil } // Apply applies the rule to given file. -func (r *UnusedReceiverRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(args) }) +func (r *UnusedReceiverRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - w := lintUnusedReceiverRule{ - onFailure: onFailure, - allowRegex: r.allowRegex, - failureMsg: r.failureMsg, - } - - ast.Walk(w, file.AST) - - return failures -} - -// Name returns the rule name. -func (*UnusedReceiverRule) Name() string { - return "unused-receiver" -} - -type lintUnusedReceiverRule struct { - onFailure func(lint.Failure) - allowRegex *regexp.Regexp - failureMsg string -} - -func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.FuncDecl: - if n.Recv == nil { - return nil // skip this func decl, not a method + for _, decl := range file.AST.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + isMethod := ok && funcDecl.Recv != nil + if !isMethod { + continue } - rec := n.Recv.List[0] // safe to access only the first (unique) element of the list + rec := funcDecl.Recv.List[0] // safe to access only the first (unique) element of the list if len(rec.Names) < 1 { - return nil // the receiver is anonymous: func (aType) Foo(...) ... + continue // the receiver is anonymous: func (aType) Foo(...) ... } recID := rec.Names[0] if recID.Name == "_" { - return nil // the receiver is already named _ + continue // the receiver is already named _ } - if w.allowRegex != nil && w.allowRegex.FindStringIndex(recID.Name) != nil { - return nil + if r.allowRegex != nil && r.allowRegex.FindStringIndex(recID.Name) != nil { + continue } // inspect the func body looking for references to the receiver id - fselect := func(n ast.Node) bool { + selectReceiverUses := func(n ast.Node) bool { ident, isAnID := n.(*ast.Ident) return isAnID && ident.Obj == recID.Obj } - refs2recID := pick(n.Body, fselect) + receiverUses := pick(funcDecl.Body, selectReceiverUses) - if len(refs2recID) > 0 { - return nil // the receiver is referenced in the func body + if len(receiverUses) > 0 { + continue // the receiver is referenced in the func body } - w.onFailure(lint.Failure{ + failures = append(failures, lint.Failure{ Confidence: 1, Node: recID, - Category: "bad practice", - Failure: fmt.Sprintf(w.failureMsg, recID.Name), + Category: lint.FailureCategoryBadPractice, + Failure: fmt.Sprintf(r.failureMsg, recID.Name), }) - - return nil // full method body already inspected } - return w + return failures +} + +// Name returns the rule name. +func (*UnusedReceiverRule) Name() string { + return "unused-receiver" } diff --git a/vendor/github.com/mgechev/revive/rule/use_any.go b/vendor/github.com/mgechev/revive/rule/use_any.go index 88160c2fa..0ebb4d628 100644 --- a/vendor/github.com/mgechev/revive/rule/use_any.go +++ b/vendor/github.com/mgechev/revive/rule/use_any.go @@ -6,7 +6,7 @@ import ( "github.com/mgechev/revive/lint" ) -// UseAnyRule lints given else constructs. +// UseAnyRule proposes to replace `interface{}` with its alias `any`. type UseAnyRule struct{} // Apply applies the rule to given file. @@ -46,7 +46,7 @@ func (w lintUseAny) Visit(n ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Node: n, Confidence: 1, - Category: "naming", + Category: lint.FailureCategoryNaming, Failure: "since Go 1.18 'interface{}' can be replaced by 'any'", }) diff --git a/vendor/github.com/mgechev/revive/rule/use_errors_new.go b/vendor/github.com/mgechev/revive/rule/use_errors_new.go new file mode 100644 index 000000000..a43505484 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/use_errors_new.go @@ -0,0 +1,60 @@ +package rule + +import ( + "go/ast" + + "github.com/mgechev/revive/lint" +) + +// UseErrorsNewRule spots calls to fmt.Errorf that can be replaced by errors.New. +type UseErrorsNewRule struct{} + +// Apply applies the rule to given file. +func (*UseErrorsNewRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + walker := lintFmtErrorf{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (*UseErrorsNewRule) Name() string { + return "use-errors-new" +} + +type lintFmtErrorf struct { + onFailure func(lint.Failure) +} + +func (w lintFmtErrorf) Visit(n ast.Node) ast.Visitor { + funcCall, ok := n.(*ast.CallExpr) + if !ok { + return w // not a function call + } + + isFmtErrorf := isPkgDot(funcCall.Fun, "fmt", "Errorf") + if !isFmtErrorf { + return w // not a call to fmt.Errorf + } + + if len(funcCall.Args) > 1 { + return w // the use of fmt.Errorf is legit + } + + // the call is of the form fmt.Errorf("...") + w.onFailure(lint.Failure{ + Category: lint.FailureCategoryErrors, + Node: n, + Confidence: 1, + Failure: "replace fmt.Errorf by errors.New", + }) + + return w +} diff --git a/vendor/github.com/mgechev/revive/rule/utils.go b/vendor/github.com/mgechev/revive/rule/utils.go index 1267c2d39..5075c7823 100644 --- a/vendor/github.com/mgechev/revive/rule/utils.go +++ b/vendor/github.com/mgechev/revive/rule/utils.go @@ -6,29 +6,23 @@ import ( "go/ast" "go/printer" "go/token" - "go/types" "regexp" - "strings" "github.com/mgechev/revive/lint" ) -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } - -var commonMethods = map[string]bool{ - "Error": true, - "Read": true, - "ServeHTTP": true, - "String": true, - "Write": true, - "Unwrap": true, -} - -var knownNameExceptions = map[string]bool{ - "LastInsertId": true, // must match database/sql - "kWh": true, +// exitFunctions is a map of std packages and functions that are considered as exit functions. +var exitFunctions = map[string]map[string]bool{ + "os": {"Exit": true}, + "syscall": {"Exit": true}, + "log": { + "Fatal": true, + "Fatalf": true, + "Fatalln": true, + "Panic": true, + "Panicf": true, + "Panicln": true, + }, } func isCgoExported(f *ast.FuncDecl) bool { @@ -45,34 +39,11 @@ func isCgoExported(f *ast.FuncDecl) bool { return false } -var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) - func isIdent(expr ast.Expr, ident string) bool { id, ok := expr.(*ast.Ident) return ok && id.Name == ident } -var zeroLiteral = map[string]bool{ - "false": true, // bool - // runes - `'\x00'`: true, - `'\000'`: true, - // strings - `""`: true, - "``": true, - // numerics - "0": true, - "0.": true, - "0.0": true, - "0i": true, -} - -func validType(t types.Type) bool { - return t != nil && - t != types.Typ[types.Invalid] && - !strings.Contains(t.String(), "invalid type") // good but not foolproof -} - // isPkgDot checks if the expression is . func isPkgDot(expr ast.Expr, pkg, name string) bool { sel, ok := expr.(*ast.SelectorExpr) @@ -125,32 +96,6 @@ func (p picker) Visit(node ast.Node) ast.Visitor { return p } -// isBoolOp returns true if the given token corresponds to -// a bool operator -func isBoolOp(t token.Token) bool { - switch t { - case token.LAND, token.LOR, token.EQL, token.NEQ: - return true - } - - return false -} - -const ( - trueName = "true" - falseName = "false" -) - -func isExprABooleanLit(n ast.Node) (lexeme string, ok bool) { - oper, ok := n.(*ast.Ident) - - if !ok { - return "", false - } - - return oper.Name, (oper.Name == trueName || oper.Name == falseName) -} - // gofmt returns a string representation of an AST subtree. func gofmt(x any) string { buf := bytes.Buffer{} @@ -160,10 +105,11 @@ func gofmt(x any) string { } // checkNumberOfArguments fails if the given number of arguments is not, at least, the expected one -func checkNumberOfArguments(expected int, args lint.Arguments, ruleName string) { +func checkNumberOfArguments(expected int, args lint.Arguments, ruleName string) error { if len(args) < expected { - panic(fmt.Sprintf("not enough arguments for %s rule, expected %d, got %d. Please check the rule's documentation", ruleName, expected, len(args))) + return fmt.Errorf("not enough arguments for %s rule, expected %d, got %d. Please check the rule's documentation", ruleName, expected, len(args)) } + return nil } var directiveCommentRE = regexp.MustCompile("^//(line |extern |export |[a-z0-9]+:[a-z0-9])") // see https://go-review.googlesource.com/c/website/+/442516/1..2/_content/doc/comment.md#494 @@ -171,3 +117,13 @@ var directiveCommentRE = regexp.MustCompile("^//(line |extern |export |[a-z0-9]+ func isDirectiveComment(line string) bool { return directiveCommentRE.MatchString(line) } + +// isCallToExitFunction checks if the function call is a call to an exit function. +func isCallToExitFunction(pkgName, functionName string) bool { + return exitFunctions[pkgName] != nil && exitFunctions[pkgName][functionName] +} + +// newInternalFailureError returns a slice of Failure with a single internal failure in it +func newInternalFailureError(e error) []lint.Failure { + return []lint.Failure{lint.NewInternalFailure(e.Error())} +} diff --git a/vendor/github.com/mgechev/revive/rule/var_declarations.go b/vendor/github.com/mgechev/revive/rule/var_declarations.go index 3f9d7068a..8119fc8d4 100644 --- a/vendor/github.com/mgechev/revive/rule/var_declarations.go +++ b/vendor/github.com/mgechev/revive/rule/var_declarations.go @@ -5,11 +5,27 @@ import ( "go/ast" "go/token" "go/types" + "strings" "github.com/mgechev/revive/lint" ) -// VarDeclarationsRule lints given else constructs. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// VarDeclarationsRule reduces redundancies around variable declaration. type VarDeclarationsRule struct{} // Apply applies the rule to given file. @@ -69,7 +85,7 @@ func (w *lintVarDeclarations) Visit(node ast.Node) ast.Visitor { // If the RHS is a isZero value, suggest dropping it. isZero := false if lit, ok := rhs.(*ast.BasicLit); ok { - isZero = zeroLiteral[lit.Value] + isZero = isZeroValue(lit.Value, v.Type) } else if isIdent(rhs, "nil") { isZero = true } @@ -77,7 +93,7 @@ func (w *lintVarDeclarations) Visit(node ast.Node) ast.Visitor { w.onFailure(lint.Failure{ Confidence: 0.9, Node: rhs, - Category: "zero-value", + Category: lint.FailureCategoryZeroValue, Failure: fmt.Sprintf("should drop = %s from declaration of var %s; it is the zero value", w.file.Render(rhs), v.Names[0]), }) return nil @@ -111,7 +127,7 @@ func (w *lintVarDeclarations) Visit(node ast.Node) ast.Visitor { } w.onFailure(lint.Failure{ - Category: "type-inference", + Category: lint.FailureCategoryTypeInference, Confidence: 0.8, Node: v.Type, Failure: fmt.Sprintf("should omit type %s from declaration of var %s; it will be inferred from the right-hand side", w.file.Render(v.Type), v.Names[0]), @@ -120,3 +136,22 @@ func (w *lintVarDeclarations) Visit(node ast.Node) ast.Visitor { } return w } + +func validType(t types.Type) bool { + return t != nil && + t != types.Typ[types.Invalid] && + !strings.Contains(t.String(), "invalid type") // good but not foolproof +} + +func isZeroValue(litValue string, typ ast.Expr) bool { + switch val := typ.(type) { + case *ast.Ident: + if val.Name == "any" { + return litValue == "nil" + } + case *ast.InterfaceType: + return litValue == "nil" + } + + return zeroLiteral[litValue] +} diff --git a/vendor/github.com/mgechev/revive/rule/var_naming.go b/vendor/github.com/mgechev/revive/rule/var_naming.go index 2c2198dbd..bffcbb276 100644 --- a/vendor/github.com/mgechev/revive/rule/var_naming.go +++ b/vendor/github.com/mgechev/revive/rule/var_naming.go @@ -6,33 +6,48 @@ import ( "go/token" "regexp" "strings" - "sync" "github.com/mgechev/revive/lint" ) var anyCapsRE = regexp.MustCompile(`[A-Z]`) +var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + // regexp for constant names like `SOME_CONST`, `SOME_CONST_2`, `X123_3`, `_SOME_PRIVATE_CONST` (#851, #865) var upperCaseConstRE = regexp.MustCompile(`^_?[A-Z][A-Z\d]*(_[A-Z\d]+)*$`) -// VarNamingRule lints given else constructs. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +// VarNamingRule lints the name of a variable. type VarNamingRule struct { allowList []string blockList []string allowUpperCaseConst bool // if true - allows to use UPPER_SOME_NAMES for constants skipPackageNameChecks bool - - configureOnce sync.Once } -func (r *VarNamingRule) configure(arguments lint.Arguments) { +// Configure validates the rule configuration, and configures the rule accordingly. +// +// Configuration implements the [lint.ConfigurableRule] interface. +func (r *VarNamingRule) Configure(arguments lint.Arguments) error { if len(arguments) >= 1 { - r.allowList = getList(arguments[0], "allowlist") + list, err := getList(arguments[0], "allowlist") + if err != nil { + return err + } + r.allowList = list } if len(arguments) >= 2 { - r.blockList = getList(arguments[1], "blocklist") + list, err := getList(arguments[1], "blocklist") + if err != nil { + return err + } + r.blockList = list } if len(arguments) >= 3 { @@ -40,28 +55,29 @@ func (r *VarNamingRule) configure(arguments lint.Arguments) { thirdArgument := arguments[2] asSlice, ok := thirdArgument.([]any) if !ok { - panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, got %T", "options", arguments[2])) + return fmt.Errorf("invalid third argument to the var-naming rule. Expecting a %s of type slice, got %T", "options", arguments[2]) } if len(asSlice) != 1 { - panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, but %d", "options", len(asSlice))) + return fmt.Errorf("invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, but %d", "options", len(asSlice)) } args, ok := asSlice[0].(map[string]any) if !ok { - panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, with map, but %T", "options", asSlice[0])) + return fmt.Errorf("invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, with map, but %T", "options", asSlice[0]) } r.allowUpperCaseConst = fmt.Sprint(args["upperCaseConst"]) == "true" r.skipPackageNameChecks = fmt.Sprint(args["skipPackageNameChecks"]) == "true" } + return nil } -func (r *VarNamingRule) applyPackageCheckRules(walker *lintNames) { +func (*VarNamingRule) applyPackageCheckRules(walker *lintNames) { // Package names need slightly different handling than other names. if strings.Contains(walker.fileAst.Name.Name, "_") && !strings.HasSuffix(walker.fileAst.Name.Name, "_test") { walker.onFailure(lint.Failure{ Failure: "don't use an underscore in package name", Confidence: 1, Node: walker.fileAst.Name, - Category: "naming", + Category: lint.FailureCategoryNaming, }) } if anyCapsRE.MatchString(walker.fileAst.Name.Name) { @@ -69,15 +85,13 @@ func (r *VarNamingRule) applyPackageCheckRules(walker *lintNames) { Failure: fmt.Sprintf("don't use MixedCaps in package name; %s should be %s", walker.fileAst.Name.Name, strings.ToLower(walker.fileAst.Name.Name)), Confidence: 1, Node: walker.fileAst.Name, - Category: "naming", + Category: lint.FailureCategoryNaming, }) } } // Apply applies the rule to given file. -func (r *VarNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configureOnce.Do(func() { r.configure(arguments) }) - +func (r *VarNamingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure fileAst := file.AST @@ -138,7 +152,7 @@ func (w *lintNames) check(id *ast.Ident, thing string) { Failure: "don't use ALL_CAPS in Go names; use CamelCase", Confidence: 0.8, Node: id, - Category: "naming", + Category: lint.FailureCategoryNaming, }) return } @@ -153,7 +167,7 @@ func (w *lintNames) check(id *ast.Ident, thing string) { Failure: fmt.Sprintf("don't use underscores in Go names; %s %s should be %s", thing, id.Name, should), Confidence: 0.9, Node: id, - Category: "naming", + Category: lint.FailureCategoryNaming, }) return } @@ -161,7 +175,7 @@ func (w *lintNames) check(id *ast.Ident, thing string) { Failure: fmt.Sprintf("%s %s should be %s", thing, id.Name, should), Confidence: 0.8, Node: id, - Category: "naming", + Category: lint.FailureCategoryNaming, }) } @@ -256,18 +270,18 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { return w } -func getList(arg any, argName string) []string { +func getList(arg any, argName string) ([]string, error) { args, ok := arg.([]any) if !ok { - panic(fmt.Sprintf("Invalid argument to the var-naming rule. Expecting a %s of type slice with initialisms, got %T", argName, arg)) + return nil, fmt.Errorf("invalid argument to the var-naming rule. Expecting a %s of type slice with initialisms, got %T", argName, arg) } var list []string for _, v := range args { val, ok := v.(string) if !ok { - panic(fmt.Sprintf("Invalid %s values of the var-naming rule. Expecting slice of strings but got element of type %T", val, arg)) + return nil, fmt.Errorf("invalid %s values of the var-naming rule. Expecting slice of strings but got element of type %T", val, arg) } list = append(list, val) } - return list + return list, nil } diff --git a/vendor/github.com/mgechev/revive/rule/waitgroup_by_value.go b/vendor/github.com/mgechev/revive/rule/waitgroup_by_value.go index a2d304ae5..1b8c2756c 100644 --- a/vendor/github.com/mgechev/revive/rule/waitgroup_by_value.go +++ b/vendor/github.com/mgechev/revive/rule/waitgroup_by_value.go @@ -38,7 +38,7 @@ func (w lintWaitGroupByValueRule) Visit(node ast.Node) ast.Visitor { return w } - // Check all function's parameters + // Check all function parameters for _, field := range fd.Type.Params.List { if !w.isWaitGroup(field.Type) { continue diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c75823490..000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9..000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca72..000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac3..000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go index ee4e7bb6e..5646309e0 100644 --- a/vendor/github.com/modern-go/reflect2/safe_type.go +++ b/vendor/github.com/modern-go/reflect2/safe_type.go @@ -6,10 +6,12 @@ import ( ) type safeType struct { - reflect.Type - cfg *frozenConfig + Type reflect.Type + cfg *frozenConfig } +var _ Type = &safeType{} + func (type2 *safeType) New() interface{} { return reflect.New(type2.Type).Interface() } @@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer { panic("does not support unsafe operation") } +func (type2 *safeType) Kind() reflect.Kind { + return type2.Type.Kind() +} + +func (type2 *safeType) Len() int { + return type2.Type.Len() +} + +func (type2 *safeType) NumField() int { + return type2.Type.NumField() +} + +func (type2 *safeType) String() string { + return type2.Type.String() +} + func (type2 *safeType) Elem() Type { return type2.cfg.Type2(type2.Type.Elem()) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/README.md b/vendor/github.com/nunnatsa/ginkgolinter/README.md index 012628ed7..83c436359 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/README.md +++ b/vendor/github.com/nunnatsa/ginkgolinter/README.md @@ -180,7 +180,7 @@ var _ = Describe("checking something", Focus, func() { These container, or the `Focus` spec, must not be part of the final source code, and should only be used locally by the developer. -***This rule is disabled by default***. Use the `--forbid-focus-container=true` command line flag to enable it. +***This rule is disabled by default***. Use the `--forbid-focus-container` command line flag to enable it. ### Comparing values from different types [BUG] @@ -189,7 +189,7 @@ The `Equal` and the `BeIdentical` matchers also check the type, not only the val The following code will fail in runtime: ```go x := 5 // x is int -Expect(x).Should(Eqaul(uint(5)) // x and uint(5) are with different +Expect(x).Should(Equal(uint(5)) // x and uint(5) are with different ``` When using negative checks, it's even worse, because we get a false positive: ``` @@ -202,7 +202,7 @@ using casting, or use the `BeEquivalentTo` matcher. The linter can't guess what is the best solution in each case, and so it won't auto-fix this warning. -To suppress this warning entirely, use the `--suppress-type-compare-assertion=true` command line parameter. +To suppress this warning entirely, use the `--suppress-type-compare-assertion` command line parameter. To suppress a specific file or line, use the `// ginkgo-linter:ignore-type-compare-warning` comment (see [below](#suppress-warning-from-the-code)) @@ -234,7 +234,7 @@ flag **is** set. ***Note***: This rule work with best-effort approach. It can't find many cases, like const defined not in the same package, or when using variables. -The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). This rule checks if the async (`Eventually` or `Consistently`) timeout duration, is not shorter than the polling interval. @@ -274,7 +274,7 @@ a Gomega object as their first parameter, and returns nothing, e.g. this is a va ***Note***: This rule **does not** support auto-fix. ### Avoid Spec Pollution: Don't Initialize Variables in Container Nodes [BUG/STYLE]: -***Note***: Only applied when the `--forbid-spec-pollution=true` flag is set (disabled by default). +***Note***: Only applied when the `--forbid-spec-pollution` flag is set (disabled by default). According to [ginkgo documentation](https://onsi.github.io/ginkgo/#avoid-spec-pollution-dont-initialize-variables-in-container-nodes), no variable should be assigned within a container node (`Describe`, `Context`, `When` or their `F`, `P` or `X` forms) @@ -451,13 +451,13 @@ Expect("abc").ShouldNot(BeEmpty()) // => Expect("abc").ToNot(BeEmpty()) ``` This rule support auto fixing. -***This rule is disabled by default***. Use the `--force-expect-to=true` command line flag to enable it. +***This rule is disabled by default***. Use the `--force-expect-to` command line flag to enable it. ### Async timing interval: multiple timeout or polling intervals [STYLE] ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. -The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). The linter checks that there is up to one polling argument and up to one timeout argument. @@ -475,7 +475,7 @@ Eventually(aFunc, time.Second*10, time.Millisecond * 500).WithPolling(time.Milli ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. -gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): * a `time.Duration` value * any kind of numeric value (int(8/16/32/64), uint(8/16/32/64) or float(32/64), as the number of seconds. * duration string like `"12s"` @@ -522,20 +522,20 @@ will trigger a warning with a suggestion to replace the mather to ```go Expect(myErrorFunc()).To(Succeed()) ``` -***This rule is disabled by default***. Use the `--force-succeed=true` command line flag to enable it. +***This rule is disabled by default***. Use the `--force-succeed` command line flag to enable it. ***Note***: This rule **does** support auto-fix, when the `--fix` command line parameter is used. ## Suppress the linter ### Suppress warning from command line -* Use the `--suppress-len-assertion=true` flag to suppress the wrong length and cap assertions warning -* Use the `--suppress-nil-assertion=true` flag to suppress the wrong nil assertion warning -* Use the `--suppress-err-assertion=true` flag to suppress the wrong error assertion warning -* Use the `--suppress-compare-assertion=true` flag to suppress the wrong comparison assertion warning -* Use the `--suppress-async-assertion=true` flag to suppress the function call in async assertion warning -* Use the `--forbid-focus-container=true` flag to activate the focused container assertion (deactivated by default) -* Use the `--suppress-type-compare-assertion=true` to suppress the type compare assertion warning -* Use the `--allow-havelen-0=true` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from +* Use the `--suppress-len-assertion` flag to suppress the wrong length and cap assertions warning +* Use the `--suppress-nil-assertion` flag to suppress the wrong nil assertion warning +* Use the `--suppress-err-assertion` flag to suppress the wrong error assertion warning +* Use the `--suppress-compare-assertion` flag to suppress the wrong comparison assertion warning +* Use the `--suppress-async-assertion` flag to suppress the function call in async assertion warning +* Use the `--forbid-focus-container` flag to activate the focused container assertion (deactivated by default) +* Use the `--suppress-type-compare-assertion` to suppress the type compare assertion warning +* Use the `--allow-havelen-0` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from command line, and not from a comment. ### Suppress warning from the code @@ -559,7 +559,7 @@ To suppress the wrong async assertion warning, add a comment with (only) `ginkgo-linter:ignore-async-assert-warning`. -To supress the focus container warning, add a comment with (only) +To suppress the focus container warning, add a comment with (only) `ginkgo-linter:ignore-focus-container-warning` @@ -572,10 +572,10 @@ Notice that this comment will not work for an anonymous variable container like // ginkgo-linter:ignore-focus-container-warning (not working!!) var _ = FDescribe(...) ``` -In this case, use the file comment (see bellow). +In this case, use the file comment (see below). There are two options to use these comments: -1. If the comment is at the top of the file, supress the warning for the whole file; e.g.: +1. If the comment is at the top of the file, suppress the warning for the whole file; e.g.: ```go package mypackage diff --git a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go index dbc39aba5..ac762cd9b 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go @@ -37,21 +37,19 @@ func NewAnalyzer() *analysis.Analyzer { a := NewAnalyzerWithConfig(config) - var ignored bool a.Flags.Init("ginkgolinter", flag.ExitOnError) - a.Flags.Var(&config.SuppressLen, "suppress-len-assertion", "Suppress warning for wrong length assertions") - a.Flags.Var(&config.SuppressNil, "suppress-nil-assertion", "Suppress warning for wrong nil assertions") - a.Flags.Var(&config.SuppressErr, "suppress-err-assertion", "Suppress warning for wrong error assertions") - a.Flags.Var(&config.SuppressCompare, "suppress-compare-assertion", "Suppress warning for wrong comparison assertions") - a.Flags.Var(&config.SuppressAsync, "suppress-async-assertion", "Suppress warning for function call in async assertion, like Eventually") - a.Flags.Var(&config.ValidateAsyncIntervals, "validate-async-intervals", "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") - a.Flags.Var(&config.SuppressTypeCompare, "suppress-type-compare-assertion", "Suppress warning for comparing values from different types, like int32 and uint32") - a.Flags.Var(&config.AllowHaveLen0, "allow-havelen-0", "Do not warn for HaveLen(0); default = false") - a.Flags.Var(&config.ForceExpectTo, "force-expect-to", "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") - a.Flags.BoolVar(&ignored, "suppress-focus-container", true, "Suppress warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt. Deprecated and ignored: use --forbid-focus-container instead") - a.Flags.Var(&config.ForbidFocus, "forbid-focus-container", "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") - a.Flags.Var(&config.ForbidSpecPollution, "forbid-spec-pollution", "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") - a.Flags.Var(&config.ForceSucceedForFuncs, "force-succeed", "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") + a.Flags.BoolVar(&config.SuppressLen, "suppress-len-assertion", config.SuppressLen, "Suppress warning for wrong length assertions") + a.Flags.BoolVar(&config.SuppressNil, "suppress-nil-assertion", config.SuppressNil, "Suppress warning for wrong nil assertions") + a.Flags.BoolVar(&config.SuppressErr, "suppress-err-assertion", config.SuppressErr, "Suppress warning for wrong error assertions") + a.Flags.BoolVar(&config.SuppressCompare, "suppress-compare-assertion", config.SuppressCompare, "Suppress warning for wrong comparison assertions") + a.Flags.BoolVar(&config.SuppressAsync, "suppress-async-assertion", config.SuppressAsync, "Suppress warning for function call in async assertion, like Eventually") + a.Flags.BoolVar(&config.ValidateAsyncIntervals, "validate-async-intervals", config.ValidateAsyncIntervals, "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") + a.Flags.BoolVar(&config.SuppressTypeCompare, "suppress-type-compare-assertion", config.SuppressTypeCompare, "Suppress warning for comparing values from different types, like int32 and uint32") + a.Flags.BoolVar(&config.AllowHaveLen0, "allow-havelen-0", config.AllowHaveLen0, "Do not warn for HaveLen(0); default = false") + a.Flags.BoolVar(&config.ForceExpectTo, "force-expect-to", config.ForceExpectTo, "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") + a.Flags.BoolVar(&config.ForbidFocus, "forbid-focus-container", config.ForbidFocus, "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") + a.Flags.BoolVar(&config.ForbidSpecPollution, "forbid-spec-pollution", config.ForbidSpecPollution, "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") + a.Flags.BoolVar(&config.ForceSucceedForFuncs, "force-succeed", config.ForceSucceedForFuncs, "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") return a } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/doc.go b/vendor/github.com/nunnatsa/ginkgolinter/doc.go index c07b6a316..2a935e9b3 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/doc.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/doc.go @@ -94,7 +94,7 @@ For example: Eventually(func() bool { return true }, time.Second*10, 500*time.Millisecond).ProbeEvery(time.Millisecond * 500).Should(BeTrue()) * async timing interval: non-time.Duration intervals [Style] -gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): * time.Duration * any kind of numeric value, as number of seconds * duration string like "12s" diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go index c289b24de..5bd6dd6e7 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go @@ -21,13 +21,8 @@ type Actual struct { actualOffset int } -func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string, errMethodExists bool) (*Actual, bool) { - funcName, ok := handler.GetActualFuncName(orig) - if !ok { - return nil, false - } - - arg, actualOffset := getActualArgPayload(orig, clone, pass, funcName, errMethodExists) +func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, timePkg string, info *gomegahandler.GomegaBasicInfo) (*Actual, bool) { + arg, actualOffset := getActualArgPayload(orig, clone, pass, info) if arg == nil { return nil, false } @@ -45,7 +40,7 @@ func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallE isTuple = tpl.Len() > 1 } - isAsyncExpr := gomegainfo.IsAsyncActualMethod(funcName) + isAsyncExpr := gomegainfo.IsAsyncActualMethod(info.MethodName) var asyncArg *AsyncArg if isAsyncExpr { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go index 541a22330..7ba83c586 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go @@ -8,6 +8,8 @@ import ( "golang.org/x/tools/go/analysis" "github.com/nunnatsa/ginkgolinter/internal/expression/value" + "github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo" + "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" "github.com/nunnatsa/ginkgolinter/internal/gomegainfo" "github.com/nunnatsa/ginkgolinter/internal/reverseassertion" ) @@ -40,15 +42,15 @@ func (a ArgType) Is(val ArgType) bool { return a&val != 0 } -func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, actualMethodName string, errMethodExists bool) (ArgPayload, int) { - origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, actualMethodName, pass) +func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, info *gomegahandler.GomegaBasicInfo) (ArgPayload, int) { + origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, info.MethodName, pass) if !isGomegaExpr { return nil, 0 } var arg ArgPayload - if errMethodExists { + if info.HasErrorMethod { arg = &ErrorMethodPayload{} } else if value.IsExprError(pass, origArgExpr) { arg = newErrPayload(origArgExpr, argExprClone, pass) @@ -97,7 +99,7 @@ func getActualArg(origActualExpr *ast.CallExpr, actualExprClone *ast.CallExpr, a argExprClone = actualExprClone.Args[funcOffset] if gomegainfo.IsAsyncActualMethod(actualMethodName) { - if pass.TypesInfo.TypeOf(origArgExpr).String() == "context.Context" { + if ginkgoinfo.IsGinkgoContext(pass.TypesInfo.TypeOf(origArgExpr)) { funcOffset++ if len(origActualExpr.Args) <= funcOffset { return nil, nil, 0, false diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go index c2aa702b1..6e8e0db6a 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go @@ -27,7 +27,8 @@ type GomegaExpression struct { origAssertionFuncName string actualFuncName string - isAsync bool + isAsync bool + isUsingGomegaVar bool actual *actual.Actual matcher *matcher.Matcher @@ -36,8 +37,8 @@ type GomegaExpression struct { } func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string) (*GomegaExpression, bool) { - actualMethodName, ok := handler.GetActualFuncName(origExpr) - if !ok || !gomegainfo.IsActualMethod(actualMethodName) { + info, ok := handler.GetGomegaBasicInfo(origExpr) + if !ok || !gomegainfo.IsActualMethod(info.MethodName) { return nil, false } @@ -45,16 +46,14 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand if !ok || !gomegainfo.IsAssertionFunc(origSel.Sel.Name) { return &GomegaExpression{ orig: origExpr, - actualFuncName: actualMethodName, + actualFuncName: info.MethodName, }, true } exprClone := astcopy.CallExpr(origExpr) selClone := exprClone.Fun.(*ast.SelectorExpr) - errMethodExists := false - - origActual := handler.GetActualExpr(origSel, &errMethodExists) + origActual := handler.GetActualExpr(origSel) if origActual == nil { return nil, false } @@ -64,7 +63,7 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand return nil, false } - actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, handler, timePkg, errMethodExists) + actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, timePkg, info) if !ok { return nil, false } @@ -89,9 +88,10 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand assertionFuncName: origSel.Sel.Name, origAssertionFuncName: origSel.Sel.Name, - actualFuncName: actualMethodName, + actualFuncName: info.MethodName, - isAsync: actl.IsAsync(), + isAsync: actl.IsAsync(), + isUsingGomegaVar: info.UseGomegaVar, actual: actl, matcher: mtchr, @@ -135,6 +135,10 @@ func (e *GomegaExpression) IsAsync() bool { return e.isAsync } +func (e *GomegaExpression) IsUsingGomegaVar() bool { + return e.isUsingGomegaVar +} + func (e *GomegaExpression) ReverseAssertionFuncLogic() { assertionFunc := e.clone.Fun.(*ast.SelectorExpr).Sel newName := reverseassertion.ChangeAssertionLogic(assertionFunc.Name) diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go index 0969b9551..7a983cc9e 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go @@ -41,13 +41,13 @@ func New(origMatcher, matcherClone *ast.CallExpr, pass *analysis.Pass, handler g reverse := false var assertFuncName string for { - ok := false - assertFuncName, ok = handler.GetActualFuncName(origMatcher) + info, ok := handler.GetGomegaBasicInfo(origMatcher) if !ok { return nil, false } - if assertFuncName != "Not" { + if info.MethodName != "Not" { + assertFuncName = info.MethodName break } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go index dda0dd73b..ba74722d2 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go @@ -194,6 +194,10 @@ func IsExprError(pass *analysis.Pass, expr ast.Expr) bool { return interfaces.ImplementsError(actualArgType) case *gotypes.Pointer: + if interfaces.ImplementsError(t) { + return true + } + if tt, ok := t.Elem().(*gotypes.Named); ok { return interfaces.ImplementsError(tt) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go index 4b6de5767..322bbc453 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go @@ -20,11 +20,11 @@ const ( func handleGinkgoSpecs(expr ast.Expr, config types.Config, pass *analysis.Pass, ginkgoHndlr Handler) bool { goDeeper := false if exp, ok := expr.(*ast.CallExpr); ok { - if bool(config.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, exp) { + if config.ForbidFocus && checkFocusContainer(pass, ginkgoHndlr, exp) { goDeeper = true } - if bool(config.ForbidSpecPollution) && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { + if config.ForbidSpecPollution && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { goDeeper = true } } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go new file mode 100644 index 000000000..bdc8b2e16 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go @@ -0,0 +1,26 @@ +package ginkgoinfo + +import ( + gotypes "go/types" + "strings" +) + +const ( + ctxTypeName = "context.Context" + ginkgoCtxSuffix = "github.com/onsi/ginkgo/v2/internal.SpecContext" +) + +func IsGinkgoContext(t gotypes.Type) bool { + maybeCtx := gotypes.Unalias(t) + + typeName := maybeCtx.String() + if typeName == ctxTypeName { + return true + } + + if strings.HasSuffix(typeName, ginkgoCtxSuffix) { + return true + } + + return false +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go index c853ca906..8ab87c76e 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go @@ -14,24 +14,34 @@ type dotHandler struct { pass *analysis.Pass } -// GetActualFuncName returns the name of the gomega function, e.g. `Expect` -func (h dotHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { - switch actualFunc := expr.Fun.(type) { - case *ast.Ident: - return actualFunc.Name, true - case *ast.SelectorExpr: - if h.isGomegaVar(actualFunc.X) { - return actualFunc.Sel.Name, true - } +// GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info +func (h dotHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { + info := &GomegaBasicInfo{} + for { + switch actualFunc := expr.Fun.(type) { + case *ast.Ident: + info.MethodName = actualFunc.Name + return info, true + case *ast.SelectorExpr: + if h.isGomegaVar(actualFunc.X) { + info.UseGomegaVar = true + info.MethodName = actualFunc.Sel.Name + return info, true + } - if x, ok := actualFunc.X.(*ast.CallExpr); ok { - return h.GetActualFuncName(x) - } + if actualFunc.Sel.Name == "Error" { + info.HasErrorMethod = true + } - case *ast.CallExpr: - return h.GetActualFuncName(actualFunc) + if x, ok := actualFunc.X.(*ast.CallExpr); ok { + expr = x + } else { + return nil, false + } + default: + return nil, false + } } - return "", false } // ReplaceFunction replaces the function with another one, for fix suggestions @@ -51,7 +61,7 @@ func (dotHandler) GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast } } -func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr { +func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { actualExpr, ok := assertionFunc.X.(*ast.CallExpr) if !ok { return nil @@ -66,11 +76,7 @@ func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExis return actualExpr } } else { - if fun.Sel.Name == "Error" { - *errMethodExists = true - } - - return h.GetActualExpr(fun, errMethodExists) + return h.GetActualExpr(fun) } } return nil diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go index 414438ba6..881ec8789 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go @@ -14,17 +14,23 @@ const ( // in imported with "." name, custom name or without any name. type Handler interface { // GetActualFuncName returns the name of the gomega function, e.g. `Expect` - GetActualFuncName(*ast.CallExpr) (string, bool) + GetGomegaBasicInfo(*ast.CallExpr) (*GomegaBasicInfo, bool) // ReplaceFunction replaces the function with another one, for fix suggestions ReplaceFunction(*ast.CallExpr, *ast.Ident) - GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr + GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr GetActualExprClone(origFunc, funcClone *ast.SelectorExpr) *ast.CallExpr GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast.CallExpr } +type GomegaBasicInfo struct { + MethodName string + UseGomegaVar bool + HasErrorMethod bool +} + // GetGomegaHandler returns a gomegar handler according to the way gomega was imported in the specific file func GetGomegaHandler(file *ast.File, pass *analysis.Pass) Handler { for _, imp := range file.Imports { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go index 66d563311..61c471f4c 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go @@ -18,28 +18,39 @@ type nameHandler struct { pass *analysis.Pass } -// GetActualFuncName returns the name of the gomega function, e.g. `Expect` -func (g nameHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { - selector, ok := expr.Fun.(*ast.SelectorExpr) - if !ok { - return "", false - } +// GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info +func (g nameHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { + info := &GomegaBasicInfo{} + for { + selector, ok := expr.Fun.(*ast.SelectorExpr) + if !ok { + return nil, false + } - switch x := selector.X.(type) { - case *ast.Ident: - if x.Name != g.name { - if !g.isGomegaVar(x) { - return "", false - } + if selector.Sel.Name == "Error" { + info.HasErrorMethod = true } - return selector.Sel.Name, true + switch x := selector.X.(type) { + case *ast.Ident: + if x.Name != g.name { + if !g.isGomegaVar(x) { + return nil, false + } + info.UseGomegaVar = true + } + + info.MethodName = selector.Sel.Name - case *ast.CallExpr: - return g.GetActualFuncName(x) - } + return info, true + + case *ast.CallExpr: + expr = x - return "", false + default: + return nil, false + } + } } // ReplaceFunction replaces the function with another one, for fix suggestions @@ -51,7 +62,7 @@ func (g nameHandler) isGomegaVar(x ast.Expr) bool { return gomegainfo.IsGomegaVar(x, g.pass) } -func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExists *bool) *ast.CallExpr { +func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { actualExpr, ok := assertionFunc.X.(*ast.CallExpr) if !ok { return nil @@ -69,10 +80,7 @@ func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr, errMethodExi return actualExpr } } else { - if fun.Sel.Name == "Error" { - *errMethodExists = true - } - return g.GetActualExpr(fun, errMethodExists) + return g.GetActualExpr(fun) } } return nil diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go index ca45a34b2..93be55ec0 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go @@ -3,7 +3,7 @@ package gomegainfo import ( "go/ast" gotypes "go/types" - "regexp" + "strings" "golang.org/x/tools/go/analysis" ) @@ -85,29 +85,33 @@ func IsAssertionFunc(name string) bool { return false } -var gomegaTypeRegex = regexp.MustCompile(`github\.com/onsi/gomega/(?:internal|types)\.Gomega`) - func IsGomegaVar(x ast.Expr, pass *analysis.Pass) bool { - if tx, ok := pass.TypesInfo.Types[x]; ok { - return IsGomegaType(tx.Type) + if _, isIdent := x.(*ast.Ident); !isIdent { + return false } - return false + tx, ok := pass.TypesInfo.Types[x] + if !ok { + return false + } + + return IsGomegaType(tx.Type) } +const ( + gomegaStructType = "github.com/onsi/gomega/internal.Gomega" + gomegaInterface = "github.com/onsi/gomega/types.Gomega" +) + func IsGomegaType(t gotypes.Type) bool { - var typeStr string - switch ttx := t.(type) { + switch ttx := gotypes.Unalias(t).(type) { case *gotypes.Pointer: - tp := ttx.Elem() - typeStr = tp.String() + return IsGomegaType(ttx.Elem()) case *gotypes.Named: - typeStr = ttx.String() - - default: - return false + name := ttx.String() + return strings.HasSuffix(name, gomegaStructType) || strings.HasSuffix(name, gomegaInterface) } - return gomegaTypeRegex.MatchString(typeStr) + return false } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go index e4eda7f6c..307cd2d12 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go @@ -19,11 +19,11 @@ const valueInEventually = "use a function call in %[1]s. This actually checks no type AsyncFuncCallRule struct{} func (r AsyncFuncCallRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - if bool(config.SuppressAsync) || !gexp.IsAsync() { + if config.SuppressAsync || !gexp.IsAsync() { return false } - if asyncArg := gexp.GetAsyncActualArg(); asyncRules != nil { + if asyncArg := gexp.GetAsyncActualArg(); asyncArg != nil { return !asyncArg.IsValid() } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go index 45953ec01..ca5c32619 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go @@ -20,7 +20,7 @@ const ( type AsyncTimeIntervalsRule struct{} func (r AsyncTimeIntervalsRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return !bool(config.SuppressAsync) && bool(config.ValidateAsyncIntervals) && gexp.IsAsync() + return !config.SuppressAsync && config.ValidateAsyncIntervals && gexp.IsAsync() } func (r AsyncTimeIntervalsRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go index 81d703bb8..4b6eafdda 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go @@ -14,7 +14,7 @@ const compareDifferentTypes = "use %[1]s with different types: Comparing %[2]s w type EqualDifferentTypesRule struct{} func (r EqualDifferentTypesRule) isApplied(config types.Config) bool { - return !bool(config.SuppressTypeCompare) + return !config.SuppressTypeCompare } func (r EqualDifferentTypesRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go index 5b28e7d9b..f27dfb0d8 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go @@ -12,7 +12,7 @@ import ( type EqualNilRule struct{} func (r EqualNilRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return !bool(config.SuppressNil) && + return !config.SuppressNil && gexp.MatcherTypeIs(matcher.EqualValueMatcherType) } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go index 20bcb7211..159fb615a 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go @@ -10,7 +10,7 @@ import ( type HaveLen0 struct{} func (r *HaveLen0) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !bool(config.AllowHaveLen0) + return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !config.AllowHaveLen0 } func (r *HaveLen0) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go index 437d3ee23..317e22ed3 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go @@ -24,7 +24,7 @@ func (r HaveOccurredRule) Apply(gexp *expression.GomegaExpression, config types. return true } - if bool(config.ForceSucceedForFuncs) && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { + if config.ForceSucceedForFuncs && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { gexp.ReverseAssertionFuncLogic() gexp.SetMatcherSucceed() reportBuilder.AddIssue(true, "prefer using the Succeed matcher for error function, instead of HaveOccurred") diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go index fc3cd49e5..6677dce3b 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go @@ -42,9 +42,9 @@ func (r NilCompareRule) isApplied(gexp *expression.GomegaExpression, config type return false, false } - isErr := actl.IsError() && !bool(config.SuppressErr) + isErr := actl.IsError() && !config.SuppressErr - if !isErr && bool(config.SuppressNil) { + if !isErr && config.SuppressNil { return isErr, false } diff --git a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go index 6a5167a8a..45a8d948b 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go @@ -28,7 +28,7 @@ func (r SucceedRule) Apply(gexp *expression.GomegaExpression, config types.Confi return true } - if bool(config.ForceSucceedForFuncs) && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { + if config.ForceSucceedForFuncs && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { gexp.ReverseAssertionFuncLogic() gexp.SetMatcherHaveOccurred() diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go deleted file mode 100644 index be510c4e9..000000000 --- a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go +++ /dev/null @@ -1,32 +0,0 @@ -package types - -import ( - "errors" - "strings" -) - -// Boolean is a bool, implementing the flag.Value interface, to be used as a flag var. -type Boolean bool - -func (b *Boolean) Set(value string) error { - if b == nil { - return errors.New("trying to set nil parameter") - } - switch strings.ToLower(value) { - case "true": - *b = true - case "false": - *b = false - default: - return errors.New(value + " is not a Boolean value") - - } - return nil -} - -func (b Boolean) String() string { - if b { - return "true" - } - return "false" -} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go index 0aadd3416..81a9ebe32 100644 --- a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go @@ -17,22 +17,22 @@ const ( ) type Config struct { - SuppressLen Boolean - SuppressNil Boolean - SuppressErr Boolean - SuppressCompare Boolean - SuppressAsync Boolean - ForbidFocus Boolean - SuppressTypeCompare Boolean - AllowHaveLen0 Boolean - ForceExpectTo Boolean - ValidateAsyncIntervals Boolean - ForbidSpecPollution Boolean - ForceSucceedForFuncs Boolean + SuppressLen bool + SuppressNil bool + SuppressErr bool + SuppressCompare bool + SuppressAsync bool + ForbidFocus bool + SuppressTypeCompare bool + AllowHaveLen0 bool + ForceExpectTo bool + ValidateAsyncIntervals bool + ForbidSpecPollution bool + ForceSucceedForFuncs bool } func (s *Config) AllTrue() bool { - return bool(s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus) + return s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus } func (s *Config) Clone() Config { diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml index 461415cbc..e307e5af6 100644 --- a/vendor/github.com/openshift/api/.ci-operator.yaml +++ b/vendor/github.com/openshift/api/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.24-openshift-4.20 + tag: rhel-9-release-golang-1.24-openshift-4.21 diff --git a/vendor/github.com/openshift/api/.golangci.go-validated.yaml b/vendor/github.com/openshift/api/.golangci.go-validated.yaml new file mode 100644 index 000000000..44c73149d --- /dev/null +++ b/vendor/github.com/openshift/api/.golangci.go-validated.yaml @@ -0,0 +1,58 @@ +version: "2" +linters: + default: none + enable: + - kubeapilinter + settings: + custom: + kubeapilinter: + path: tools/_output/bin/kube-api-linter.so + description: kubeapilinter is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enable: + - optionalfields + disable: + - "*" + lintersConfig: + optionalfields: + pointers: + preference: Always + policy: SuggestFix + omitEmpty: + # This will force omitempty on optional fields. + # This is in line with upstream guidance where optional fields should be omitted + # from the serialized output unless they are non-zero. + policy: SuggestFix + omitzero: + # This will force omitzero on optional struct fields. + # This means they can be omitted correctly and prevents the need for pointers to structs. + policy: SuggestFix + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ + rules: + - linters: + - kubeapilinter + # This regex must always be updated in tandem with the regex in .golangci.yaml that prevents `optionalfields` from being applied to the files in the path-except. + path-except: machine/v1beta1/(types_awsprovider.go|types_azureprovider.go|types_gcpprovider.go|types_vsphereprovider.go)|machine/v1alpha1/types_openstack.go +issues: + # We have a lot of existing issues. + # Want to make sure that those adding new fields have an + # opportunity to fix them when running the linter locally. + max-issues-per-linter: 1000 +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml index c5278d9c8..649f9bed3 100644 --- a/vendor/github.com/openshift/api/.golangci.yaml +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -25,10 +25,28 @@ linters: preference: WhenRequired policy: SuggestFix omitEmpty: - # Ignore missing omitempty so that we can omit the omitempty for discoverability. - # Discoverability is for configuration APIs, generally singletons. - # Refer to the API conventions for when to use discoverability (not our default stance). - policy: Ignore + # This will force omitempty on optional fields. + # This is in line with upstream guidance where optional fields should be omitted + # from the serialized output unless they are non-zero. + policy: SuggestFix + omitzero: + # This will force omitzero on optional struct fields. + # This means they can be omitted correctly and prevents the need for pointers to structs. + policy: SuggestFix + requiredFields: + pointers: + # This will force pointers when the field is required, but only when the zero + # value is a valid user choice, and has a semantic difference to being omitted (e.g. replicas allows 0). + policy: SuggestFix + omitempty: + # This will force omitempty on required fields. + # We do this so that the behaviour of not setting a value for the field is the same between + # both structured and unstructured clients. + policy: SuggestFix + omitzero: + # This will force omitzero on required struct fields. + # This means they can be omitted correctly and prevents the need for pointers to structs. + policy: SuggestFix uniqueMarkers: customMarkers: - identifier: "openshift:validation:FeatureGateAwareEnum" @@ -55,6 +73,12 @@ linters: - third_party$ - builtin$ - examples$ + rules: + - linters: + - kubeapilinter + # This regex must always be updated in tandem with the regex in .golangci.go-validated.yaml that prevents `optionalfields` from being applied to the files in the path. + path: machine/v1beta1/(types_awsprovider.go|types_azureprovider.go|types_gcpprovider.go|types_vsphereprovider.go)|machine/v1alpha1/types_openstack.go + text: "optionalfields" issues: # We have a lot of existing issues. # Want to make sure that those adding new fields have an diff --git a/vendor/github.com/openshift/api/AGENTS.md b/vendor/github.com/openshift/api/AGENTS.md new file mode 100644 index 000000000..a009bbb2d --- /dev/null +++ b/vendor/github.com/openshift/api/AGENTS.md @@ -0,0 +1,185 @@ +This file provides guidance to AI agents when working with code in this repository. + +This is the OpenShift API repository - the canonical location of OpenShift API type definitions and serialization code. It contains: + +- API type definitions for OpenShift-specific resources (Custom Resource Definitions) +- FeatureGate management system for controlling API availability across cluster profiles +- Generated CRD manifests and validation schemas +- Integration test suite for API validation + +## Key Architecture Components + +### FeatureGate System +The FeatureGate system (`features/features.go`) controls API availability across different cluster profiles (Hypershift, SelfManaged) and feature sets (Default, TechPreview, DevPreview). Each API feature is gated behind a FeatureGate that can be enabled/disabled per cluster profile and feature set. + +### API Structure +APIs are organized by group and version (e.g., `route/v1`, `config/v1`). Each API group contains: +- `types.go` - Go type definitions +- `zz_generated.*` files - Generated code (deepcopy, CRDs, etc.) +- `tests/` directories - Integration test definitions +- CRD manifest files + +## Common Development Commands + +### Building +```bash +make build # Build render and write-available-featuresets binaries +make clean # Clean build artifacts +``` + +### Code Generation +```bash +make update # Alias for update-codegen-crds +``` + +### Testing +```bash +make test-unit # Run unit tests +make integration # Run integration tests (in tests/ directory) +go test -v ./... # Run tests for specific packages + +# Run integration tests for specific API groups +make -C config/v1 test # Run tests for config/v1 API group +make -C route/v1 test # Run tests for route/v1 API group +make -C operator/v1 test # Run tests for operator/v1 API group +``` + +### Validation and Verification +```bash +make verify # Run all verification checks +make verify-scripts # Verify generated code is up to date +make verify-codegen-crds # Verify CRD generation is current +make lint # Run golangci-lint (only on changes from master) +make lint-fix # Auto-fix linting issues where possible +``` + +## Adding New APIs + +All APIs should start as tech preview. +New fields on stable APIs should be introduced behind a feature gate `+openshift:enable:FeatureGate=MyFeatureGate`. + + +### For New Stable APIs (v1) +1. Create the API type with proper kubebuilder annotations +2. Include required markers like `+openshift:compatibility-gen:level=1` +3. Add validation tests in `//tests//` +4. Run `make update-codegen-crds` to generate CRDs + +### For New TechPreview APIs (v1alpha1) +1. First add a FeatureGate in `features/features.go` +2. Create the API type with `+openshift:enable:FeatureGate=MyFeatureGate` +3. Add corresponding test files +4. Run generation commands + +### Adding FeatureGates +Add to `features/features.go` using the builder pattern: +```go +FeatureGateMyFeatureName = newFeatureGate("MyFeatureName"). + reportProblemsToJiraComponent("my-jira-component"). + contactPerson("my-team-lead"). + productScope(ocpSpecific). + enableIn(configv1.TechPreviewNoUpgrade). + mustRegister() +``` + +## Testing Framework + +The repository includes a comprehensive integration test suite in `tests/`. Test suites are defined in `*.testsuite.yaml` files alongside API definitions and support: +- `onCreate` tests for validation during resource creation +- `onUpdate` tests for update-specific validations and immutability +- Status subresource testing +- Validation ratcheting tests using `initialCRDPatches` + +Use `tests/hack/gen-minimal-test.sh $FOLDER $VERSION` to generate test suite templates. + +## Container-based Development +```bash +make verify-with-container # Run verification in container +make generate-with-container # Run code generation in container +``` + +Uses `podman` by default, set `RUNTIME=docker` or `USE_DOCKER=1` to use Docker instead. + +## Custom Claude Code Commands + +### API Review +``` +/api-review +``` +Runs comprehensive API review for OpenShift API changes in a GitHub PR: +- Executes `make lint` to check for kube-api-linter issues +- Validates that all API fields are properly documented +- Ensures optional fields explain behavior when not present +- Confirms validation rules and kubebuilder markers are documented in field comments + +#### Documentation Requirements +All kubebuilder validation markers must be documented in the field's comment. For example: + +**Good:** +```go +// internalDNSRecords is an optional field that determines whether we deploy +// with internal records enabled for api, api-int, and ingress. +// Valid values are "Enabled" and "Disabled". +// When set to Enabled, in cluster DNS resolution will be enabled for the api, api-int, and ingress endpoints. +// When set to Disabled, in cluster DNS resolution will be disabled and an external DNS solution must be provided for these endpoints. +// +optional +// +kubebuilder:validation:Enum=Enabled;Disabled +InternalDNSRecords InternalDNSRecordsType `json:"internalDNSRecords"` +``` + +**Bad:** +```go +// internalDNSRecords determines whether we deploy with internal records enabled for +// api, api-int, and ingress. +// +optional // ❌ Optional nature not documented in comment +// +kubebuilder:validation:Enum=Enabled;Disabled // ❌ Valid values not documented +InternalDNSRecords InternalDNSRecordsType `json:"internalDNSRecords"` +``` + +#### Systematic Validation Marker Documentation Checklist + +**MANDATORY**: For each field with validation markers, verify the comment documents ALL of the following that apply: + +**Field Optionality:** +- [ ] `+optional` - explain behavior when field is omitted +- [ ] `+required` - explain that the field is required + +**String/Array Length Constraints:** +- [ ] `+kubebuilder:validation:MinLength` and `+kubebuilder:validation:MaxLength` - document character length constraints +- [ ] `+kubebuilder:validation:MinItems` and `+kubebuilder:validation:MaxItems` - document item count ranges + +**Value Constraints:** +- [ ] `+kubebuilder:validation:Enum` - list all valid enum values and their meanings +- [ ] `+kubebuilder:validation:Pattern` - explain the pattern requirement in human-readable terms +- [ ] `+kubebuilder:validation:Minimum` and `+kubebuilder:validation:Maximum` - document numeric ranges + +**Advanced Validation:** +- [ ] `+kubebuilder:validation:XValidation` - explain cross-field validation rules in detail +- [ ] Any custom validation logic - document the validation behavior + +#### API Review Process + +**CRITICAL PROCESS**: Follow this exact order to ensure comprehensive validation: + +1. **Linting Check**: Run `make lint` and fix all kubeapilinter errors first +2. **Extract Validation Markers**: Use systematic search to find all markers +3. **Systematic Documentation Review**: For each marker found, verify corresponding documentation exists +4. **Optional Fields Review**: Ensure every `+optional` field explains omitted behavior +5. **Cross-field Validation**: Verify any documented field relationships have corresponding `XValidation` rules + +**FAILURE CONDITIONS**: The review MUST fail if any of these are found: +- Any validation marker without corresponding documentation +- Any `+optional` field without omitted behavior explanation +- Any documented field constraint without enforcement via validation rules +- Any `make lint` failures + +The comment must explicitly state: +- When a field is optional (for `+kubebuilder:validation:Optional` or `+optional`) +- Valid enum values (for `+kubebuilder:validation:Enum`) +- Validation constraints (for min/max, patterns, etc.) +- Default behavior when field is omitted +- Any interactions with other fields, commonly implemented with `+kubebuilder:validation:XValidation` + +**CRITICAL**: When API documentation states field relationships or constraints (e.g., "cannot be used together with field X", "mutually exclusive with field Y"), these relationships MUST be enforced with appropriate validation rules. Use `+kubebuilder:validation:XValidation` with CEL expressions for cross-field constraints. Documentation without enforcement is insufficient and will fail review. + +Example: `/api-review https://github.com/openshift/api/pull/1234` diff --git a/vendor/github.com/openshift/api/Dockerfile.ocp b/vendor/github.com/openshift/api/Dockerfile.ocp index 0a4c98c48..45d24f4fc 100644 --- a/vendor/github.com/openshift/api/Dockerfile.ocp +++ b/vendor/github.com/openshift/api/Dockerfile.ocp @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.24-openshift-4.20 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.24-openshift-4.21 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.20:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.21:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 123efe102..fd4268a78 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -73,7 +73,7 @@ verify-scripts: hack/verify-payload-featuregates.sh .PHONY: verify -verify: verify-scripts lint verify-crd-schema verify-codegen-crds +verify: verify-scripts lint verify-crd-schema verify-crdify verify-codegen-crds .PHONY: verify-codegen-crds verify-codegen-crds: @@ -83,6 +83,10 @@ verify-codegen-crds: verify-crd-schema: bash -x hack/verify-crd-schema-checker.sh +.PHONY: verify-crdify +verify-crdify: + bash -x hack/verify-crdify.sh + .PHONY: verify-feature-promotion verify-feature-promotion: hack/verify-promoted-features-pass-tests.sh diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS index ff904b63a..ebd9a2f45 100644 --- a/vendor/github.com/openshift/api/OWNERS +++ b/vendor/github.com/openshift/api/OWNERS @@ -1,7 +1,7 @@ reviewers: - - deads2k - JoelSpeed - everettraven approvers: - deads2k - JoelSpeed + - everettraven diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index eac29a236..222c7f0cc 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -76,6 +76,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImagePolicyList{}, &ClusterImagePolicy{}, &ClusterImagePolicyList{}, + &InsightsDataGather{}, + &InsightsDataGatherList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index e1a98cb26..0afe7b1d8 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -58,9 +58,8 @@ type APIServerSpec struct { Encryption APIServerEncryption `json:"encryption"` // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. // - // If unset, a default (which may change between releases) is chosen. Note that only Old, - // Intermediate and Custom profiles are currently supported, and the maximum available - // minTLSVersion is VersionTLS12. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is the Intermediate profile. // +optional TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` // audit specifies the settings for audit configuration to be applied to all OpenShift-provided diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index 004e94723..52a41b2fe 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -91,6 +91,7 @@ type AuthenticationSpec struct { // +kubebuilder:validation:MaxItems=1 // +openshift:enable:FeatureGate=ExternalOIDC // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -253,9 +254,16 @@ type TokenIssuer struct { // The Kubernetes API server determines how authentication tokens should be handled // by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. // - // issuerURL must use the 'https' scheme. + // Must be at least 1 character and must not exceed 512 characters in length. + // Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. // - // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'",message="must use the 'https' scheme" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getQuery() == {}",message="must not have a query" + // +kubebuilder:validation:XValidation:rule="self.find('#(.+)$') == ''",message="must not have a fragment" + // +kubebuilder:validation:XValidation:rule="self.find('@') == ''",message="must not have user info" + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:MinLength=1 // +required URL string `json:"issuerURL"` @@ -320,10 +328,10 @@ type TokenClaimMappings struct { // used to construct the extra attribute for the cluster identity. // When omitted, no extra attributes will be present on the cluster identity. // key values for extra mappings must be unique. - // A maximum of 64 extra attribute mappings may be provided. + // A maximum of 32 extra attribute mappings may be provided. // // +optional - // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:MaxItems=32 // +listType=map // +listMapKey=key // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings @@ -375,10 +383,10 @@ type TokenClaimOrExpressionMapping struct { // Precisely one of claim or expression must be set. // expression must not be specified when claim is set. // When specified, expression must be at least 1 character in length - // and must not exceed 4096 characters in length. + // and must not exceed 1024 characters in length. // // +optional - // +kubebuilder:validation:MaxLength=4096 + // +kubebuilder:validation:MaxLength=1024 // +kubebuilder:validation:MinLength=1 Expression string `json:"expression,omitempty"` } @@ -437,12 +445,12 @@ type ExtraMapping struct { // For example, the 'sub' claim value can be accessed as 'claims.sub'. // Nested claims can be accessed using dot notation ('claims.foo.bar'). // - // valueExpression must not exceed 4096 characters in length. + // valueExpression must not exceed 1024 characters in length. // valueExpression must not be empty. // // +required // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:MaxLength=4096 + // +kubebuilder:validation:MaxLength=1024 ValueExpression string `json:"valueExpression"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index a447adb9f..832304038 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -9,10 +9,9 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterOperator is the Custom Resource object which holds the current state -// of an operator. This object is used by operators to convey their state to -// the rest of the cluster. -// +// ClusterOperator holds the status of a core or optional OpenShift component +// managed by the Cluster Version Operator (CVO). This object is used by +// operators to convey their state to the rest of the cluster. // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 @@ -154,15 +153,21 @@ const ( // is functional and available in the cluster. Available=False means at least // part of the component is non-functional, and that the condition requires // immediate administrator intervention. + // A component must not report Available=False during the course of a normal upgrade. OperatorAvailable ClusterStatusConditionType = "Available" // Progressing indicates that the component (operator and all configured operands) - // is actively rolling out new code, propagating config changes, or otherwise + // is actively rolling out new code, propagating config changes (e.g, a version change), or otherwise // moving from one steady state to another. Operators should not report - // progressing when they are reconciling (without action) a previously known - // state. If the observed cluster state has changed and the component is - // reacting to it (scaling up for instance), Progressing should become true + // Progressing when they are reconciling (without action) a previously known + // state. Operators should not report Progressing only because DaemonSets owned by them + // are adjusting to a new node from cluster scaleup or a node rebooting from cluster upgrade. + // If the observed cluster state has changed and the component is + // reacting to it (updated proxy configuration for instance), Progressing should become true // since it is moving from one steady state to another. + // A component in a cluster with less than 250 nodes must complete a version + // change within a limited period of time: 90 minutes for Machine Config Operator and 20 minutes for others. + // Machine Config Operator is given more time as it needs to restart control plane nodes. OperatorProgressing ClusterStatusConditionType = "Progressing" // Degraded indicates that the component (operator and all configured operands) @@ -175,7 +180,7 @@ const ( // Degraded because it may have a lower quality of service. A component may be // Progressing but not Degraded because the transition from one state to // another does not persist over a long enough period to report Degraded. A - // component should not report Degraded during the course of a normal upgrade. + // component must not report Degraded during the course of a normal upgrade. // A component may report Degraded in response to a persistent infrastructure // failure that requires eventual administrator intervention. For example, if // a control plane host is unhealthy and must be replaced. A component should diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 54e1de94c..cfac9689e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -257,7 +257,7 @@ type UpdateHistory struct { // acceptedRisks records risks which were accepted to initiate the update. // For example, it may menition an Upgradeable=False or missing signature - // that was overriden via desiredUpdate.force, or an update that was + // that was overridden via desiredUpdate.force, or an update that was // initiated despite not being in the availableUpdates set of recommended // update targets. // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 1fc06418c..effafde64 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -532,7 +532,7 @@ type AWSPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=AWSClusterHostedDNS + // +openshift:enable:FeatureGate=AWSClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -594,6 +594,19 @@ type AzurePlatformStatus struct { // +listType=atomic // +optional ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall + // +optional + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` } // AzureResourceTag is a tag to apply to Azure resources created for the cluster. @@ -637,7 +650,7 @@ const ( ) // GCPServiceEndpointName is the name of the GCP Service Endpoint. -// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;ServiceUsage;Storage +// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;IAMCredentials;OAuth;ServiceUsage;Storage;STS type GCPServiceEndpointName string const ( @@ -659,11 +672,20 @@ const ( // GCPServiceEndpointNameIAM is the name used for the GCP IAM Service endpoint. GCPServiceEndpointNameIAM GCPServiceEndpointName = "IAM" + // GCPServiceEndpointNameIAMCredentials is the name used for the GCP IAM Credentials Service endpoint. + GCPServiceEndpointNameIAMCredentials GCPServiceEndpointName = "IAMCredentials" + + // GCPServiceEndpointNameOAuth is the name used for the GCP OAuth2 Service endpoint. + GCPServiceEndpointNameOAuth GCPServiceEndpointName = "OAuth" + // GCPServiceEndpointNameServiceUsage is the name used for the GCP Service Usage Service endpoint. GCPServiceEndpointNameServiceUsage GCPServiceEndpointName = "ServiceUsage" // GCPServiceEndpointNameStorage is the name used for the GCP Storage Service endpoint. GCPServiceEndpointNameStorage GCPServiceEndpointName = "Storage" + + // GCPServiceEndpointNameSTS is the name used for the GCP STS Service endpoint. + GCPServiceEndpointNameSTS GCPServiceEndpointName = "STS" ) // GCPServiceEndpoint store the configuration of a custom url to @@ -745,7 +767,7 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=GCPClusterHostedDNS + // +openshift:enable:FeatureGate=GCPClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -754,13 +776,13 @@ type GCPPlatformStatus struct { // used when creating clients to interact with GCP services. // When not specified, the default endpoint for the GCP region will be used. // Only 1 endpoint override is permitted for each GCP service. - // The maximum number of endpoint overrides allowed is 9. + // The maximum number of endpoint overrides allowed is 11. // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MaxItems=11 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" // +optional - // +openshift:enable:FeatureGate=GCPCustomAPIEndpoints + // +openshift:enable:FeatureGate=GCPCustomAPIEndpointsInstall ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` } @@ -1715,7 +1737,7 @@ type IBMCloudPlatformSpec struct { // serviceEndpoints is a list of custom endpoints which will override the default // service endpoints of an IBM service. These endpoints are used by components // within the cluster when trying to reach the IBM Cloud Services that have been - // overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus // are updated to reflect the same custom endpoints. // A maximum of 13 service endpoints overrides are supported. @@ -1749,7 +1771,7 @@ type IBMCloudPlatformStatus struct { // serviceEndpoints is a list of custom endpoints which will override the default // service endpoints of an IBM service. These endpoints are used by components // within the cluster when trying to reach the IBM Cloud Services that have been - // overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus // are updated to reflect the same custom endpoints. // +openshift:validation:FeatureGateAwareMaxItems:featureGate=DyanmicServiceEndpointIBMCloud,maxItems=13 diff --git a/vendor/github.com/openshift/api/config/v1/types_insights.go b/vendor/github.com/openshift/api/config/v1/types_insights.go new file mode 100644 index 000000000..b0959881f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_insights.go @@ -0,0 +1,230 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// InsightsDataGather provides data gather configuration options for the Insights Operator. +// +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2448 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsDataGather struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +required + Spec InsightsDataGatherSpec `json:"spec,omitempty,omitzero"` +} + +// InsightsDataGatherSpec contains the configuration for the data gathering. +type InsightsDataGatherSpec struct { + // gatherConfig is a required spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress. + // +required + GatherConfig GatherConfig `json:"gatherConfig,omitempty,omitzero"` +} + +// GatherConfig provides data gathering configuration options. +type GatherConfig struct { + // dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + // It may not exceed 2 items and must not contain duplicates. + // Valid values are ObfuscateNetworking and WorkloadNames. + // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + // When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + // When omitted no obfuscation is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="dataPolicy items must be unique" + // +listType=atomic + // +optional + DataPolicy []DataPolicyOption `json:"dataPolicy,omitempty"` + // gatherers is a required field that specifies the configuration of the gatherers. + // +required + Gatherers Gatherers `json:"gatherers,omitempty,omitzero"` + // storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + // If omitted, the gathering job will use ephemeral storage. + // +optional + Storage Storage `json:"storage,omitempty,omitzero"` +} + +// Gatherers specifies the configuration of the gatherers +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Custom' ? has(self.custom) : !has(self.custom)",message="custom is required when mode is Custom, and forbidden otherwise" +// +union +type Gatherers struct { + // mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + // When set to All, all gatherers will run and gather data. + // When set to None, all gatherers will be disabled and no data will be gathered. + // When set to Custom, the custom configuration from the custom field will be applied. + // +unionDiscriminator + // +required + Mode GatheringMode `json:"mode,omitempty"` + // custom provides gathering configuration. + // It is required when mode is Custom, and forbidden otherwise. + // Custom configuration allows user to disable only a subset of gatherers. + // Gatherers that are not explicitly disabled in custom configuration will run. + // +unionMember + // +optional + Custom Custom `json:"custom,omitempty,omitzero"` +} + +// Custom provides the custom configuration of gatherers +type Custom struct { + // configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + // It may not exceed 100 items and each gatherer can be present only once. + // It is possible to disable an entire set of gatherers while allowing a specific function within that set. + // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + // +listType=map + // +listMapKey=name + // +required + Configs []GathererConfig `json:"configs,omitempty"` +} + +// GatheringMode defines the valid gathering modes. +// +kubebuilder:validation:Enum=All;None;Custom +type GatheringMode string + +const ( + // Enabled enables all gatherers + GatheringModeAll GatheringMode = "All" + // Disabled disables all gatherers + GatheringModeNone GatheringMode = "None" + // Custom applies the configuration from GatheringConfig. + GatheringModeCustom GatheringMode = "Custom" +) + +// DataPolicyOption declares valid data policy options +// +kubebuilder:validation:Enum=ObfuscateNetworking;WorkloadNames +type DataPolicyOption string + +const ( + // IP addresses and cluster domain name are obfuscated + DataPolicyOptionObfuscateNetworking DataPolicyOption = "ObfuscateNetworking" + // Data from Deployment Validation Operator are obfuscated + DataPolicyOptionObfuscateWorkloadNames DataPolicyOption = "WorkloadNames" +) + +// Storage provides persistent storage configuration options for gathering jobs. +// If the type is set to PersistentVolume, then the PersistentVolume must be defined. +// If the type is set to Ephemeral, then the PersistentVolume must not be defined. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'PersistentVolume' ? has(self.persistentVolume) : !has(self.persistentVolume)",message="persistentVolume is required when type is PersistentVolume, and forbidden otherwise" +// +union +type Storage struct { + // type is a required field that specifies the type of storage that will be used to store the Insights data archive. + // Valid values are "PersistentVolume" and "Ephemeral". + // When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + // When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + // +unionDiscriminator + // +required + Type StorageType `json:"type,omitempty"` + // persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + // The PersistentVolume must be created in the openshift-insights namespace. + // +unionMember + // +optional + PersistentVolume PersistentVolumeConfig `json:"persistentVolume,omitempty,omitzero"` +} + +// StorageType declares valid storage types +// +kubebuilder:validation:Enum=PersistentVolume;Ephemeral +type StorageType string + +const ( + // StorageTypePersistentVolume storage type + StorageTypePersistentVolume StorageType = "PersistentVolume" + // StorageTypeEphemeral storage type + StorageTypeEphemeral StorageType = "Ephemeral" +) + +// PersistentVolumeConfig provides configuration options for PersistentVolume storage. +type PersistentVolumeConfig struct { + // claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + // The PersistentVolumeClaim must be created in the openshift-insights namespace. + // +required + Claim PersistentVolumeClaimReference `json:"claim,omitempty,omitzero"` + // mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default mount path is /var/lib/insights-operator + // The path may not exceed 1024 characters and must not contain a colon. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:XValidation:rule="!self.contains(':')",message="mountPath must not contain a colon" + // +optional + MountPath string `json:"mountPath,omitempty"` +} + +// PersistentVolumeClaimReference is a reference to a PersistentVolumeClaim. +type PersistentVolumeClaimReference struct { + // name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. + // It is a string that follows the DNS1123 subdomain format. + // It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name,omitempty"` +} + +// GathererConfig allows to configure specific gatherers +type GathererConfig struct { + // name is the required name of a specific gatherer. + // It may not exceed 256 characters. + // The format for a gatherer name is: {gatherer}/{function} where the function is optional. + // Gatherer consists of a lowercase letters only that may include underscores (_). + // Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + // The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$")`,message=`gatherer name must be in the format of {gatherer}/{function} where the gatherer and function are lowercase letters only that may include underscores (_) and are separated by a forward slash (/) if the function is provided` + // +required + Name string `json:"name,omitempty"` + // state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + // When set to Enabled the gatherer will run. + // When set to Disabled the gatherer will not run. + // +required + State GathererState `json:"state,omitempty"` +} + +// GathererState declares valid gatherer state types. +// +kubebuilder:validation:Enum=Enabled;Disabled +type GathererState string + +const ( + // GathererStateEnabled gatherer state, which means that the gatherer will run. + GathererStateEnabled GathererState = "Enabled" + // GathererStateDisabled gatherer state, which means that the gatherer will not run. + GathererStateDisabled GathererState = "Disabled" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsDataGatherList is a collection of items +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InsightsDataGatherList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the required standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata,omitempty"` + // items is the required list of InsightsDataGather objects + // it may not exceed 100 items + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=100 + // +required + Items []InsightsDataGather `json:"items,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusteroperators.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusteroperators.crd.yaml index 7ab62874a..7bb5defcb 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusteroperators.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusteroperators.crd.yaml @@ -42,10 +42,9 @@ spec: schema: openAPIV3Schema: description: |- - ClusterOperator is the Custom Resource object which holds the current state - of an operator. This object is used by operators to convey their state to - the rest of the cluster. - + ClusterOperator holds the status of a core or optional OpenShift component + managed by the Cluster Version Operator (CVO). This object is used by + operators to convey their state to the rest of the cluster. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). properties: apiVersion: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml index 087b62dda..fe8e41c08 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml @@ -748,7 +748,7 @@ spec: description: |- acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature - that was overriden via desiredUpdate.force, or an update that was + that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml index f93da1e2e..1b2662e08 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml @@ -664,7 +664,7 @@ spec: description: |- acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature - that was overriden via desiredUpdate.force, or an update that was + that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml index 300d94a71..3d0a05471 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml @@ -748,7 +748,7 @@ spec: description: |- acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature - that was overriden via desiredUpdate.force, or an update that was + that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml index 6fc2cb0d9..1e0f08de8 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml @@ -748,7 +748,7 @@ spec: description: |- acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature - that was overriden via desiredUpdate.force, or an update that was + that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml index b10b46c6f..f4416bf9b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml @@ -296,9 +296,8 @@ spec: description: |- tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. - If unset, a default (which may change between releases) is chosen. Note that only Old, - Intermediate and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default is the Intermediate profile. properties: custom: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml index 44dc2924a..37662cb58 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-Default.crd.yaml @@ -227,9 +227,8 @@ spec: description: |- tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. - If unset, a default (which may change between releases) is chosen. Note that only Old, - Intermediate and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default is the Intermediate profile. properties: custom: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml index 843984380..bfeefa11f 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml @@ -296,9 +296,8 @@ spec: description: |- tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. - If unset, a default (which may change between releases) is chosen. Note that only Old, - Intermediate and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default is the Intermediate profile. properties: custom: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml index 808e11aac..a49976e0d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml @@ -296,9 +296,8 @@ spec: description: |- tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. - If unset, a default (which may change between releases) is chosen. Note that only Old, - Intermediate and Custom profiles are currently supported, and the maximum available - minTLSVersion is VersionTLS12. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default is the Intermediate profile. properties: custom: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-CustomNoUpgrade.crd.yaml deleted file mode 100644 index a42368f2b..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,857 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster - Can only be set if "Type" is set to "OIDC". - - At most one provider can be configured. - items: - properties: - claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. - properties: - extra: - description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. - When omitted, no extra attributes will be present on the cluster identity. - key values for extra mappings must be unique. - A maximum of 64 extra attribute mappings may be provided. - items: - description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. - properties: - key: - description: |- - key is a required field that specifies the string - to use as the extra attribute key. - - key must be a domain-prefix path (e.g 'example.org/foo'). - key must not exceed 510 characters in length. - key must contain the '/' character, separating the domain and path characters. - key must not be empty. - - The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. - It must not exceed 253 characters in length. - It must start and end with an alphanumeric character. - It must only contain lower case alphanumeric characters and '-' or '.'. - It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. - It must not exceed 256 characters in length. - maxLength: 510 - minLength: 1 - type: string - x-kubernetes-validations: - - message: key must contain the '/' character - rule: self.contains('/') - - message: the domain of the key must consist of only - lower case alphanumeric characters, '-' or '.', - and must start and end with an alphanumeric character - rule: self.split('/', 2)[0].matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") - - message: the domain of the key must not exceed 253 - characters in length - rule: self.split('/', 2)[0].size() <= 253 - - message: the domain 'kubernetes.io' is reserved - for Kubernetes use - rule: self.split('/', 2)[0] != 'kubernetes.io' - - message: the subdomains '*.kubernetes.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.kubernetes.io'')' - - message: the domain 'k8s.io' is reserved for Kubernetes - use - rule: self.split('/', 2)[0] != 'k8s.io' - - message: the subdomains '*.k8s.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.k8s.io'')' - - message: the domain 'openshift.io' is reserved for - OpenShift use - rule: self.split('/', 2)[0] != 'openshift.io' - - message: the subdomains '*.openshift.io' are reserved - for OpenShift use - rule: '!self.split(''/'', 2)[0].endsWith(''.openshift.io'')' - - message: the path of the key must not be empty and - must consist of at least one alphanumeric character, - percent-encoded octets, apostrophe, '-', '.', - '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', - ';', '=', and ':' - rule: self.split('/', 2)[1].matches('[A-Za-z0-9/\\-._~%!$&\'()*+;=:]+') - - message: the path of the key must not exceed 256 - characters in length - rule: self.split('/', 2)[1].size() <= 256 - valueExpression: - description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. - valueExpression must produce a string or string array value. - "", [], and null are treated as the extra mapping not being present. - Empty string values within an array are filtered out. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - valueExpression must not exceed 4096 characters in length. - valueExpression must not be empty. - maxLength: 4096 - minLength: 1 - type: string - required: - - key - - valueExpression - type: object - maxItems: 64 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - groups: - description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). - For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - type: string - prefix: - description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. - - When omitted (""), no prefix is applied to the cluster identity attribute. - - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". - type: string - required: - - claim - type: object - uid: - description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. - - When using uid.claim to specify the claim it must be a single string value. - When using uid.expression the expression must result in a single string value. - - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. - The current default is to use the 'sub' claim. - properties: - claim: - description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. - - Precisely one of claim or expression must be set. - claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - expression: - description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - Precisely one of claim or expression must be set. - expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 4096 characters in length. - maxLength: 4096 - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: precisely one of claim or expression must be - set - rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' - username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - - claim must not be an empty string ("") and must not exceed 256 characters. - maxLength: 256 - minLength: 1 - type: string - prefix: - description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. - - prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. - properties: - prefixString: - description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. - - prefixString must not be an empty string (""). - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. - - Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. - - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. - As an example, consider the following scenario: - `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - and `claim` is set to: - - "username": the mapped value will be "https://myoidc.tld#userA" - - "email": the mapped value will be "userA@myoidc.tld" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - required: - - username - type: object - claimValidationRules: - description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. - - Validation rules are joined via an AND operation. - items: - properties: - requiredClaim: - description: |- - requiredClaim is an optional field that configures the required claim - and value that the Kubernetes API server will use to validate if an incoming - JWT is valid for this identity provider. - properties: - claim: - description: |- - claim is a required field that configures the name of the required claim. - When taken from the JWT claims, claim must be a string value. - - claim must not be an empty string (""). - minLength: 1 - type: string - requiredValue: - description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. - - requiredValue must not be an empty string (""). - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: |- - type is an optional field that configures the type of the validation rule. - - Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - - When set to 'RequiredClaim', the Kubernetes API server - will be configured to validate that the incoming JWT - contains the required claim and that its value matches - the required value. - - Defaults to 'RequiredClaim'. - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. - properties: - audiences: - description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. - At least one of the entries must match the 'aud' claim in the JWT token. - - audiences must contain at least one entry and must not exceed ten entries. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. - - When not specified, the system trust is used. - - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. - - name must not be an empty string (""). - minLength: 1 - type: string - oidcClients: - description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. - oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. - items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method - properties: - clientID: - description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. - - clientID must not be an empty string (""). - minLength: 1 - type: string - clientSecret: - description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. - - When not specified, no client secret will be used when making authentication requests - to the identity provider. - - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - The client secret will be used when making authentication requests to the identity provider. - - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. - - When omitted, no additional scopes are requested. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - claimMappings - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. - items: - description: |- - OIDCClientStatus represents the current state - of platform components and how they interact with - the configured identity providers. - properties: - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: |- - conditions are used to communicate the state of the `oidcClients` entry. - - Supported conditions include Available, Degraded and Progressing. - - If Available is true, the component is successfully using the configured client. - If Degraded is true, that means something has gone wrong trying to handle the client configuration. - If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. - - consumingUsers must not exceed 5 entries. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: |- - currentOIDCClients is an optional list of clients that the component is currently using. - Entries must have unique issuerURL/clientID pairs. - items: - description: |- - OIDCClientReference is a reference to a platform component - client configuration. - properties: - clientID: - description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. - - clientID must not be empty. - minLength: 1 - type: string - issuerURL: - description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. - - oidcProviderName must not be an empty string (""). - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-Default.crd.yaml deleted file mode 100644 index 687cdc83e..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-Default.crd.yaml +++ /dev/null @@ -1,706 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: Default - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster - Can only be set if "Type" is set to "OIDC". - - At most one provider can be configured. - items: - properties: - claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. - properties: - groups: - description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). - For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - type: string - prefix: - description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. - - When omitted (""), no prefix is applied to the cluster identity attribute. - - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". - type: string - required: - - claim - type: object - username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - - claim must not be an empty string ("") and must not exceed 256 characters. - maxLength: 256 - minLength: 1 - type: string - prefix: - description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. - - prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. - properties: - prefixString: - description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. - - prefixString must not be an empty string (""). - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. - - Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. - - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. - As an example, consider the following scenario: - `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - and `claim` is set to: - - "username": the mapped value will be "https://myoidc.tld#userA" - - "email": the mapped value will be "userA@myoidc.tld" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - required: - - username - type: object - claimValidationRules: - description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. - - Validation rules are joined via an AND operation. - items: - properties: - requiredClaim: - description: |- - requiredClaim is an optional field that configures the required claim - and value that the Kubernetes API server will use to validate if an incoming - JWT is valid for this identity provider. - properties: - claim: - description: |- - claim is a required field that configures the name of the required claim. - When taken from the JWT claims, claim must be a string value. - - claim must not be an empty string (""). - minLength: 1 - type: string - requiredValue: - description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. - - requiredValue must not be an empty string (""). - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: |- - type is an optional field that configures the type of the validation rule. - - Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - - When set to 'RequiredClaim', the Kubernetes API server - will be configured to validate that the incoming JWT - contains the required claim and that its value matches - the required value. - - Defaults to 'RequiredClaim'. - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. - properties: - audiences: - description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. - At least one of the entries must match the 'aud' claim in the JWT token. - - audiences must contain at least one entry and must not exceed ten entries. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. - - When not specified, the system trust is used. - - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. - - name must not be an empty string (""). - minLength: 1 - type: string - oidcClients: - description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. - oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. - items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method - properties: - clientID: - description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. - - clientID must not be an empty string (""). - minLength: 1 - type: string - clientSecret: - description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. - - When not specified, no client secret will be used when making authentication requests - to the identity provider. - - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - The client secret will be used when making authentication requests to the identity provider. - - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. - - When omitted, no additional scopes are requested. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - claimMappings - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. - items: - description: |- - OIDCClientStatus represents the current state - of platform components and how they interact with - the configured identity providers. - properties: - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: |- - conditions are used to communicate the state of the `oidcClients` entry. - - Supported conditions include Available, Degraded and Progressing. - - If Available is true, the component is successfully using the configured client. - If Degraded is true, that means something has gone wrong trying to handle the client configuration. - If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. - - consumingUsers must not exceed 5 entries. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: |- - currentOIDCClients is an optional list of clients that the component is currently using. - Entries must have unique issuerURL/clientID pairs. - items: - description: |- - OIDCClientReference is a reference to a platform component - client configuration. - properties: - clientID: - description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. - - clientID must not be empty. - minLength: 1 - type: string - issuerURL: - description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. - - oidcProviderName must not be an empty string (""). - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-DevPreviewNoUpgrade.crd.yaml deleted file mode 100644 index d828678e9..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-DevPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,857 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: DevPreviewNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster - Can only be set if "Type" is set to "OIDC". - - At most one provider can be configured. - items: - properties: - claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. - properties: - extra: - description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. - When omitted, no extra attributes will be present on the cluster identity. - key values for extra mappings must be unique. - A maximum of 64 extra attribute mappings may be provided. - items: - description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. - properties: - key: - description: |- - key is a required field that specifies the string - to use as the extra attribute key. - - key must be a domain-prefix path (e.g 'example.org/foo'). - key must not exceed 510 characters in length. - key must contain the '/' character, separating the domain and path characters. - key must not be empty. - - The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. - It must not exceed 253 characters in length. - It must start and end with an alphanumeric character. - It must only contain lower case alphanumeric characters and '-' or '.'. - It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. - It must not exceed 256 characters in length. - maxLength: 510 - minLength: 1 - type: string - x-kubernetes-validations: - - message: key must contain the '/' character - rule: self.contains('/') - - message: the domain of the key must consist of only - lower case alphanumeric characters, '-' or '.', - and must start and end with an alphanumeric character - rule: self.split('/', 2)[0].matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") - - message: the domain of the key must not exceed 253 - characters in length - rule: self.split('/', 2)[0].size() <= 253 - - message: the domain 'kubernetes.io' is reserved - for Kubernetes use - rule: self.split('/', 2)[0] != 'kubernetes.io' - - message: the subdomains '*.kubernetes.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.kubernetes.io'')' - - message: the domain 'k8s.io' is reserved for Kubernetes - use - rule: self.split('/', 2)[0] != 'k8s.io' - - message: the subdomains '*.k8s.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.k8s.io'')' - - message: the domain 'openshift.io' is reserved for - OpenShift use - rule: self.split('/', 2)[0] != 'openshift.io' - - message: the subdomains '*.openshift.io' are reserved - for OpenShift use - rule: '!self.split(''/'', 2)[0].endsWith(''.openshift.io'')' - - message: the path of the key must not be empty and - must consist of at least one alphanumeric character, - percent-encoded octets, apostrophe, '-', '.', - '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', - ';', '=', and ':' - rule: self.split('/', 2)[1].matches('[A-Za-z0-9/\\-._~%!$&\'()*+;=:]+') - - message: the path of the key must not exceed 256 - characters in length - rule: self.split('/', 2)[1].size() <= 256 - valueExpression: - description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. - valueExpression must produce a string or string array value. - "", [], and null are treated as the extra mapping not being present. - Empty string values within an array are filtered out. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - valueExpression must not exceed 4096 characters in length. - valueExpression must not be empty. - maxLength: 4096 - minLength: 1 - type: string - required: - - key - - valueExpression - type: object - maxItems: 64 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - groups: - description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). - For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - type: string - prefix: - description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. - - When omitted (""), no prefix is applied to the cluster identity attribute. - - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". - type: string - required: - - claim - type: object - uid: - description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. - - When using uid.claim to specify the claim it must be a single string value. - When using uid.expression the expression must result in a single string value. - - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. - The current default is to use the 'sub' claim. - properties: - claim: - description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. - - Precisely one of claim or expression must be set. - claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - expression: - description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - Precisely one of claim or expression must be set. - expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 4096 characters in length. - maxLength: 4096 - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: precisely one of claim or expression must be - set - rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' - username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - - claim must not be an empty string ("") and must not exceed 256 characters. - maxLength: 256 - minLength: 1 - type: string - prefix: - description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. - - prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. - properties: - prefixString: - description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. - - prefixString must not be an empty string (""). - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. - - Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. - - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. - As an example, consider the following scenario: - `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - and `claim` is set to: - - "username": the mapped value will be "https://myoidc.tld#userA" - - "email": the mapped value will be "userA@myoidc.tld" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - required: - - username - type: object - claimValidationRules: - description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. - - Validation rules are joined via an AND operation. - items: - properties: - requiredClaim: - description: |- - requiredClaim is an optional field that configures the required claim - and value that the Kubernetes API server will use to validate if an incoming - JWT is valid for this identity provider. - properties: - claim: - description: |- - claim is a required field that configures the name of the required claim. - When taken from the JWT claims, claim must be a string value. - - claim must not be an empty string (""). - minLength: 1 - type: string - requiredValue: - description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. - - requiredValue must not be an empty string (""). - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: |- - type is an optional field that configures the type of the validation rule. - - Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - - When set to 'RequiredClaim', the Kubernetes API server - will be configured to validate that the incoming JWT - contains the required claim and that its value matches - the required value. - - Defaults to 'RequiredClaim'. - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. - properties: - audiences: - description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. - At least one of the entries must match the 'aud' claim in the JWT token. - - audiences must contain at least one entry and must not exceed ten entries. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. - - When not specified, the system trust is used. - - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. - - name must not be an empty string (""). - minLength: 1 - type: string - oidcClients: - description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. - oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. - items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method - properties: - clientID: - description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. - - clientID must not be an empty string (""). - minLength: 1 - type: string - clientSecret: - description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. - - When not specified, no client secret will be used when making authentication requests - to the identity provider. - - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - The client secret will be used when making authentication requests to the identity provider. - - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. - - When omitted, no additional scopes are requested. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - claimMappings - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. - items: - description: |- - OIDCClientStatus represents the current state - of platform components and how they interact with - the configured identity providers. - properties: - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: |- - conditions are used to communicate the state of the `oidcClients` entry. - - Supported conditions include Available, Degraded and Progressing. - - If Available is true, the component is successfully using the configured client. - If Degraded is true, that means something has gone wrong trying to handle the client configuration. - If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. - - consumingUsers must not exceed 5 entries. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: |- - currentOIDCClients is an optional list of clients that the component is currently using. - Entries must have unique issuerURL/clientID pairs. - items: - description: |- - OIDCClientReference is a reference to a platform component - client configuration. - properties: - clientID: - description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. - - clientID must not be empty. - minLength: 1 - type: string - issuerURL: - description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. - - oidcProviderName must not be an empty string (""). - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index c117ad886..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,857 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster - Can only be set if "Type" is set to "OIDC". - - At most one provider can be configured. - items: - properties: - claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. - properties: - extra: - description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. - When omitted, no extra attributes will be present on the cluster identity. - key values for extra mappings must be unique. - A maximum of 64 extra attribute mappings may be provided. - items: - description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. - properties: - key: - description: |- - key is a required field that specifies the string - to use as the extra attribute key. - - key must be a domain-prefix path (e.g 'example.org/foo'). - key must not exceed 510 characters in length. - key must contain the '/' character, separating the domain and path characters. - key must not be empty. - - The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. - It must not exceed 253 characters in length. - It must start and end with an alphanumeric character. - It must only contain lower case alphanumeric characters and '-' or '.'. - It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. - It must not exceed 256 characters in length. - maxLength: 510 - minLength: 1 - type: string - x-kubernetes-validations: - - message: key must contain the '/' character - rule: self.contains('/') - - message: the domain of the key must consist of only - lower case alphanumeric characters, '-' or '.', - and must start and end with an alphanumeric character - rule: self.split('/', 2)[0].matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") - - message: the domain of the key must not exceed 253 - characters in length - rule: self.split('/', 2)[0].size() <= 253 - - message: the domain 'kubernetes.io' is reserved - for Kubernetes use - rule: self.split('/', 2)[0] != 'kubernetes.io' - - message: the subdomains '*.kubernetes.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.kubernetes.io'')' - - message: the domain 'k8s.io' is reserved for Kubernetes - use - rule: self.split('/', 2)[0] != 'k8s.io' - - message: the subdomains '*.k8s.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.k8s.io'')' - - message: the domain 'openshift.io' is reserved for - OpenShift use - rule: self.split('/', 2)[0] != 'openshift.io' - - message: the subdomains '*.openshift.io' are reserved - for OpenShift use - rule: '!self.split(''/'', 2)[0].endsWith(''.openshift.io'')' - - message: the path of the key must not be empty and - must consist of at least one alphanumeric character, - percent-encoded octets, apostrophe, '-', '.', - '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', - ';', '=', and ':' - rule: self.split('/', 2)[1].matches('[A-Za-z0-9/\\-._~%!$&\'()*+;=:]+') - - message: the path of the key must not exceed 256 - characters in length - rule: self.split('/', 2)[1].size() <= 256 - valueExpression: - description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. - valueExpression must produce a string or string array value. - "", [], and null are treated as the extra mapping not being present. - Empty string values within an array are filtered out. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - valueExpression must not exceed 4096 characters in length. - valueExpression must not be empty. - maxLength: 4096 - minLength: 1 - type: string - required: - - key - - valueExpression - type: object - maxItems: 64 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - groups: - description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). - For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - type: string - prefix: - description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. - - When omitted (""), no prefix is applied to the cluster identity attribute. - - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". - type: string - required: - - claim - type: object - uid: - description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. - - When using uid.claim to specify the claim it must be a single string value. - When using uid.expression the expression must result in a single string value. - - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. - The current default is to use the 'sub' claim. - properties: - claim: - description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. - - Precisely one of claim or expression must be set. - claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - expression: - description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - Precisely one of claim or expression must be set. - expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 4096 characters in length. - maxLength: 4096 - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: precisely one of claim or expression must be - set - rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' - username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - - claim must not be an empty string ("") and must not exceed 256 characters. - maxLength: 256 - minLength: 1 - type: string - prefix: - description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. - - prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. - properties: - prefixString: - description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. - - prefixString must not be an empty string (""). - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. - - Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. - - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. - As an example, consider the following scenario: - `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - and `claim` is set to: - - "username": the mapped value will be "https://myoidc.tld#userA" - - "email": the mapped value will be "userA@myoidc.tld" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - required: - - username - type: object - claimValidationRules: - description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. - - Validation rules are joined via an AND operation. - items: - properties: - requiredClaim: - description: |- - requiredClaim is an optional field that configures the required claim - and value that the Kubernetes API server will use to validate if an incoming - JWT is valid for this identity provider. - properties: - claim: - description: |- - claim is a required field that configures the name of the required claim. - When taken from the JWT claims, claim must be a string value. - - claim must not be an empty string (""). - minLength: 1 - type: string - requiredValue: - description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. - - requiredValue must not be an empty string (""). - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: |- - type is an optional field that configures the type of the validation rule. - - Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - - When set to 'RequiredClaim', the Kubernetes API server - will be configured to validate that the incoming JWT - contains the required claim and that its value matches - the required value. - - Defaults to 'RequiredClaim'. - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. - properties: - audiences: - description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. - At least one of the entries must match the 'aud' claim in the JWT token. - - audiences must contain at least one entry and must not exceed ten entries. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. - - When not specified, the system trust is used. - - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. - - name must not be an empty string (""). - minLength: 1 - type: string - oidcClients: - description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. - oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. - items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method - properties: - clientID: - description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. - - clientID must not be an empty string (""). - minLength: 1 - type: string - clientSecret: - description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. - - When not specified, no client secret will be used when making authentication requests - to the identity provider. - - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - The client secret will be used when making authentication requests to the identity provider. - - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. - - When omitted, no additional scopes are requested. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - claimMappings - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. - items: - description: |- - OIDCClientStatus represents the current state - of platform components and how they interact with - the configured identity providers. - properties: - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: |- - conditions are used to communicate the state of the `oidcClients` entry. - - Supported conditions include Available, Degraded and Progressing. - - If Available is true, the component is successfully using the configured client. - If Degraded is true, that means something has gone wrong trying to handle the client configuration. - If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. - - consumingUsers must not exceed 5 entries. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: |- - currentOIDCClients is an optional list of clients that the component is currently using. - Entries must have unique issuerURL/clientID pairs. - items: - description: |- - OIDCClientReference is a reference to a platform component - client configuration. - properties: - clientID: - description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. - - clientID must not be empty. - minLength: 1 - type: string - issuerURL: - description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. - - oidcProviderName must not be an empty string (""). - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml deleted file mode 100644 index be25dac52..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml +++ /dev/null @@ -1,857 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/self-managed-high-availability: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: CustomNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster - Can only be set if "Type" is set to "OIDC". - - At most one provider can be configured. - items: - properties: - claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. - properties: - extra: - description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. - When omitted, no extra attributes will be present on the cluster identity. - key values for extra mappings must be unique. - A maximum of 64 extra attribute mappings may be provided. - items: - description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. - properties: - key: - description: |- - key is a required field that specifies the string - to use as the extra attribute key. - - key must be a domain-prefix path (e.g 'example.org/foo'). - key must not exceed 510 characters in length. - key must contain the '/' character, separating the domain and path characters. - key must not be empty. - - The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. - It must not exceed 253 characters in length. - It must start and end with an alphanumeric character. - It must only contain lower case alphanumeric characters and '-' or '.'. - It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. - It must not exceed 256 characters in length. - maxLength: 510 - minLength: 1 - type: string - x-kubernetes-validations: - - message: key must contain the '/' character - rule: self.contains('/') - - message: the domain of the key must consist of only - lower case alphanumeric characters, '-' or '.', - and must start and end with an alphanumeric character - rule: self.split('/', 2)[0].matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") - - message: the domain of the key must not exceed 253 - characters in length - rule: self.split('/', 2)[0].size() <= 253 - - message: the domain 'kubernetes.io' is reserved - for Kubernetes use - rule: self.split('/', 2)[0] != 'kubernetes.io' - - message: the subdomains '*.kubernetes.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.kubernetes.io'')' - - message: the domain 'k8s.io' is reserved for Kubernetes - use - rule: self.split('/', 2)[0] != 'k8s.io' - - message: the subdomains '*.k8s.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.k8s.io'')' - - message: the domain 'openshift.io' is reserved for - OpenShift use - rule: self.split('/', 2)[0] != 'openshift.io' - - message: the subdomains '*.openshift.io' are reserved - for OpenShift use - rule: '!self.split(''/'', 2)[0].endsWith(''.openshift.io'')' - - message: the path of the key must not be empty and - must consist of at least one alphanumeric character, - percent-encoded octets, apostrophe, '-', '.', - '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', - ';', '=', and ':' - rule: self.split('/', 2)[1].matches('[A-Za-z0-9/\\-._~%!$&\'()*+;=:]+') - - message: the path of the key must not exceed 256 - characters in length - rule: self.split('/', 2)[1].size() <= 256 - valueExpression: - description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. - valueExpression must produce a string or string array value. - "", [], and null are treated as the extra mapping not being present. - Empty string values within an array are filtered out. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - valueExpression must not exceed 4096 characters in length. - valueExpression must not be empty. - maxLength: 4096 - minLength: 1 - type: string - required: - - key - - valueExpression - type: object - maxItems: 64 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - groups: - description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). - For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - type: string - prefix: - description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. - - When omitted (""), no prefix is applied to the cluster identity attribute. - - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". - type: string - required: - - claim - type: object - uid: - description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. - - When using uid.claim to specify the claim it must be a single string value. - When using uid.expression the expression must result in a single string value. - - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. - The current default is to use the 'sub' claim. - properties: - claim: - description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. - - Precisely one of claim or expression must be set. - claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - expression: - description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - Precisely one of claim or expression must be set. - expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 4096 characters in length. - maxLength: 4096 - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: precisely one of claim or expression must be - set - rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' - username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - - claim must not be an empty string ("") and must not exceed 256 characters. - maxLength: 256 - minLength: 1 - type: string - prefix: - description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. - - prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. - properties: - prefixString: - description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. - - prefixString must not be an empty string (""). - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. - - Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. - - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. - As an example, consider the following scenario: - `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - and `claim` is set to: - - "username": the mapped value will be "https://myoidc.tld#userA" - - "email": the mapped value will be "userA@myoidc.tld" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - required: - - username - type: object - claimValidationRules: - description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. - - Validation rules are joined via an AND operation. - items: - properties: - requiredClaim: - description: |- - requiredClaim is an optional field that configures the required claim - and value that the Kubernetes API server will use to validate if an incoming - JWT is valid for this identity provider. - properties: - claim: - description: |- - claim is a required field that configures the name of the required claim. - When taken from the JWT claims, claim must be a string value. - - claim must not be an empty string (""). - minLength: 1 - type: string - requiredValue: - description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. - - requiredValue must not be an empty string (""). - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: |- - type is an optional field that configures the type of the validation rule. - - Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - - When set to 'RequiredClaim', the Kubernetes API server - will be configured to validate that the incoming JWT - contains the required claim and that its value matches - the required value. - - Defaults to 'RequiredClaim'. - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. - properties: - audiences: - description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. - At least one of the entries must match the 'aud' claim in the JWT token. - - audiences must contain at least one entry and must not exceed ten entries. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. - - When not specified, the system trust is used. - - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. - - name must not be an empty string (""). - minLength: 1 - type: string - oidcClients: - description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. - oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. - items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method - properties: - clientID: - description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. - - clientID must not be an empty string (""). - minLength: 1 - type: string - clientSecret: - description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. - - When not specified, no client secret will be used when making authentication requests - to the identity provider. - - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - The client secret will be used when making authentication requests to the identity provider. - - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. - - When omitted, no additional scopes are requested. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - claimMappings - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. - items: - description: |- - OIDCClientStatus represents the current state - of platform components and how they interact with - the configured identity providers. - properties: - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: |- - conditions are used to communicate the state of the `oidcClients` entry. - - Supported conditions include Available, Degraded and Progressing. - - If Available is true, the component is successfully using the configured client. - If Degraded is true, that means something has gone wrong trying to handle the client configuration. - If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. - - consumingUsers must not exceed 5 entries. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: |- - currentOIDCClients is an optional list of clients that the component is currently using. - Entries must have unique issuerURL/clientID pairs. - items: - description: |- - OIDCClientReference is a reference to a platform component - client configuration. - properties: - clientID: - description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. - - clientID must not be empty. - minLength: 1 - type: string - issuerURL: - description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. - - oidcProviderName must not be an empty string (""). - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-Default.crd.yaml deleted file mode 100644 index 597965355..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-Default.crd.yaml +++ /dev/null @@ -1,187 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/self-managed-high-availability: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: Default - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 9ea5ab87b..000000000 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,857 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/470 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/self-managed-high-availability: "true" - release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: authentications.config.openshift.io -spec: - group: config.openshift.io - names: - kind: Authentication - listKind: AuthenticationList - plural: authentications - singular: authentication - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: |- - Authentication specifies cluster-wide settings for authentication (like OAuth and - webhook token authenticators). The canonical name of an instance is `cluster`. - - Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - oauthMetadata: - description: |- - oauthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for an external OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - If oauthMetadata.name is non-empty, this value has precedence - over any metadata reference stored in status. - The key "oauthMetadata" is used to locate the data. - If specified and the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcProviders: - description: |- - oidcProviders are OIDC identity providers that can issue tokens - for this cluster - Can only be set if "Type" is set to "OIDC". - - At most one provider can be configured. - items: - properties: - claimMappings: - description: |- - claimMappings is a required field that configures the rules to be used by - the Kubernetes API server for translating claims in a JWT token, issued - by the identity provider, to a cluster identity. - properties: - extra: - description: |- - extra is an optional field for configuring the mappings - used to construct the extra attribute for the cluster identity. - When omitted, no extra attributes will be present on the cluster identity. - key values for extra mappings must be unique. - A maximum of 64 extra attribute mappings may be provided. - items: - description: |- - ExtraMapping allows specifying a key and CEL expression - to evaluate the keys' value. It is used to create additional - mappings and attributes added to a cluster identity from - a provided authentication token. - properties: - key: - description: |- - key is a required field that specifies the string - to use as the extra attribute key. - - key must be a domain-prefix path (e.g 'example.org/foo'). - key must not exceed 510 characters in length. - key must contain the '/' character, separating the domain and path characters. - key must not be empty. - - The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. - It must not exceed 253 characters in length. - It must start and end with an alphanumeric character. - It must only contain lower case alphanumeric characters and '-' or '.'. - It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". - - The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one - alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. - It must not exceed 256 characters in length. - maxLength: 510 - minLength: 1 - type: string - x-kubernetes-validations: - - message: key must contain the '/' character - rule: self.contains('/') - - message: the domain of the key must consist of only - lower case alphanumeric characters, '-' or '.', - and must start and end with an alphanumeric character - rule: self.split('/', 2)[0].matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") - - message: the domain of the key must not exceed 253 - characters in length - rule: self.split('/', 2)[0].size() <= 253 - - message: the domain 'kubernetes.io' is reserved - for Kubernetes use - rule: self.split('/', 2)[0] != 'kubernetes.io' - - message: the subdomains '*.kubernetes.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.kubernetes.io'')' - - message: the domain 'k8s.io' is reserved for Kubernetes - use - rule: self.split('/', 2)[0] != 'k8s.io' - - message: the subdomains '*.k8s.io' are reserved - for Kubernetes use - rule: '!self.split(''/'', 2)[0].endsWith(''.k8s.io'')' - - message: the domain 'openshift.io' is reserved for - OpenShift use - rule: self.split('/', 2)[0] != 'openshift.io' - - message: the subdomains '*.openshift.io' are reserved - for OpenShift use - rule: '!self.split(''/'', 2)[0].endsWith(''.openshift.io'')' - - message: the path of the key must not be empty and - must consist of at least one alphanumeric character, - percent-encoded octets, apostrophe, '-', '.', - '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', - ';', '=', and ':' - rule: self.split('/', 2)[1].matches('[A-Za-z0-9/\\-._~%!$&\'()*+;=:]+') - - message: the path of the key must not exceed 256 - characters in length - rule: self.split('/', 2)[1].size() <= 256 - valueExpression: - description: |- - valueExpression is a required field to specify the CEL expression to extract - the extra attribute value from a JWT token's claims. - valueExpression must produce a string or string array value. - "", [], and null are treated as the extra mapping not being present. - Empty string values within an array are filtered out. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - valueExpression must not exceed 4096 characters in length. - valueExpression must not be empty. - maxLength: 4096 - minLength: 1 - type: string - required: - - key - - valueExpression - type: object - maxItems: 64 - type: array - x-kubernetes-list-map-keys: - - key - x-kubernetes-list-type: map - groups: - description: |- - groups is an optional field that configures how the groups of a cluster identity - should be constructed from the claims in a JWT token issued - by the identity provider. - When referencing a claim, if the claim is present in the JWT - token, its value must be a list of groups separated by a comma (','). - For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - type: string - prefix: - description: |- - prefix is an optional field that configures the prefix that will be - applied to the cluster identity attribute during the process of mapping - JWT claims to cluster identity attributes. - - When omitted (""), no prefix is applied to the cluster identity attribute. - - Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains - an array of strings "a", "b" and "c", the mapping will result in an - array of string "myoidc:a", "myoidc:b" and "myoidc:c". - type: string - required: - - claim - type: object - uid: - description: |- - uid is an optional field for configuring the claim mapping - used to construct the uid for the cluster identity. - - When using uid.claim to specify the claim it must be a single string value. - When using uid.expression the expression must result in a single string value. - - When omitted, this means the user has no opinion and the platform - is left to choose a default, which is subject to change over time. - The current default is to use the 'sub' claim. - properties: - claim: - description: |- - claim is an optional field for specifying the - JWT token claim that is used in the mapping. - The value of this claim will be assigned to - the field in which this mapping is associated. - - Precisely one of claim or expression must be set. - claim must not be specified when expression is set. - When specified, claim must be at least 1 character in length - and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - expression: - description: |- - expression is an optional field for specifying a - CEL expression that produces a string value from - JWT token claims. - - CEL expressions have access to the token claims - through a CEL variable, 'claims'. - 'claims' is a map of claim names to claim values. - For example, the 'sub' claim value can be accessed as 'claims.sub'. - Nested claims can be accessed using dot notation ('claims.foo.bar'). - - Precisely one of claim or expression must be set. - expression must not be specified when claim is set. - When specified, expression must be at least 1 character in length - and must not exceed 4096 characters in length. - maxLength: 4096 - minLength: 1 - type: string - type: object - x-kubernetes-validations: - - message: precisely one of claim or expression must be - set - rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' - username: - description: |- - username is a required field that configures how the username of a cluster identity - should be constructed from the claims in a JWT token issued by the identity provider. - properties: - claim: - description: |- - claim is a required field that configures the JWT token - claim whose value is assigned to the cluster identity - field associated with this mapping. - - claim must not be an empty string ("") and must not exceed 256 characters. - maxLength: 256 - minLength: 1 - type: string - prefix: - description: |- - prefix configures the prefix that should be prepended to the value - of the JWT claim. - - prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. - properties: - prefixString: - description: |- - prefixString is a required field that configures the prefix that will - be applied to cluster identity username attribute - during the process of mapping JWT claims to cluster identity attributes. - - prefixString must not be an empty string (""). - minLength: 1 - type: string - required: - - prefixString - type: object - prefixPolicy: - description: |- - prefixPolicy is an optional field that configures how a prefix should be - applied to the value of the JWT claim specified in the 'claim' field. - - Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). - - When set to 'Prefix', the value specified in the prefix field will be - prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - - When set to 'NoPrefix', no prefix will be prepended to the value - of the JWT claim. - - When omitted, this means no opinion and the platform is left to choose - any prefixes that are applied which is subject to change over time. - Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim - when the claim is not 'email'. - As an example, consider the following scenario: - `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - and `claim` is set to: - - "username": the mapped value will be "https://myoidc.tld#userA" - - "email": the mapped value will be "userA@myoidc.tld" - enum: - - "" - - NoPrefix - - Prefix - type: string - required: - - claim - type: object - x-kubernetes-validations: - - message: prefix must be set if prefixPolicy is 'Prefix', - but must remain unset otherwise - rule: 'has(self.prefixPolicy) && self.prefixPolicy == - ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) - > 0) : !has(self.prefix)' - required: - - username - type: object - claimValidationRules: - description: |- - claimValidationRules is an optional field that configures the rules to - be used by the Kubernetes API server for validating the claims in a JWT - token issued by the identity provider. - - Validation rules are joined via an AND operation. - items: - properties: - requiredClaim: - description: |- - requiredClaim is an optional field that configures the required claim - and value that the Kubernetes API server will use to validate if an incoming - JWT is valid for this identity provider. - properties: - claim: - description: |- - claim is a required field that configures the name of the required claim. - When taken from the JWT claims, claim must be a string value. - - claim must not be an empty string (""). - minLength: 1 - type: string - requiredValue: - description: |- - requiredValue is a required field that configures the value that 'claim' must - have when taken from the incoming JWT claims. - If the value in the JWT claims does not match, the token - will be rejected for authentication. - - requiredValue must not be an empty string (""). - minLength: 1 - type: string - required: - - claim - - requiredValue - type: object - type: - default: RequiredClaim - description: |- - type is an optional field that configures the type of the validation rule. - - Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). - - When set to 'RequiredClaim', the Kubernetes API server - will be configured to validate that the incoming JWT - contains the required claim and that its value matches - the required value. - - Defaults to 'RequiredClaim'. - enum: - - RequiredClaim - type: string - type: object - type: array - x-kubernetes-list-type: atomic - issuer: - description: |- - issuer is a required field that configures how the platform interacts - with the identity provider and how tokens issued from the identity provider - are evaluated by the Kubernetes API server. - properties: - audiences: - description: |- - audiences is a required field that configures the acceptable audiences - the JWT token, issued by the identity provider, must be issued to. - At least one of the entries must match the 'aud' claim in the JWT token. - - audiences must contain at least one entry and must not exceed ten entries. - items: - minLength: 1 - type: string - maxItems: 10 - minItems: 1 - type: array - x-kubernetes-list-type: set - issuerCertificateAuthority: - description: |- - issuerCertificateAuthority is an optional field that configures the - certificate authority, used by the Kubernetes API server, to validate - the connection to the identity provider when fetching discovery information. - - When not specified, the system trust is used. - - When specified, it must reference a ConfigMap in the openshift-config - namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' - key in the data field of the ConfigMap. - properties: - name: - description: name is the metadata.name of the referenced - config map - type: string - required: - - name - type: object - issuerURL: - description: |- - issuerURL is a required field that configures the URL used to issue tokens - by the identity provider. - The Kubernetes API server determines how authentication tokens should be handled - by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - required: - - audiences - - issuerURL - type: object - name: - description: |- - name is a required field that configures the unique human-readable identifier - associated with the identity provider. - It is used to distinguish between multiple identity providers - and has no impact on token validation or authentication mechanics. - - name must not be an empty string (""). - minLength: 1 - type: string - oidcClients: - description: |- - oidcClients is an optional field that configures how on-cluster, - platform clients should request tokens from the identity provider. - oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. - items: - description: |- - OIDCClientConfig configures how platform clients - interact with identity providers as an authentication - method - properties: - clientID: - description: |- - clientID is a required field that configures the client identifier, from - the identity provider, that the platform component uses for authentication - requests made to the identity provider. - The identity provider must accept this identifier for platform components - to be able to use the identity provider as an authentication mode. - - clientID must not be an empty string (""). - minLength: 1 - type: string - clientSecret: - description: |- - clientSecret is an optional field that configures the client secret used - by the platform component when making authentication requests to the identity provider. - - When not specified, no client secret will be used when making authentication requests - to the identity provider. - - When specified, clientSecret references a Secret in the 'openshift-config' - namespace that contains the client secret in the 'clientSecret' key of the '.data' field. - The client secret will be used when making authentication requests to the identity provider. - - Public clients do not require a client secret but private - clients do require a client secret to work with the identity provider. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component being configured to use the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component being configured to use the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - extraScopes: - description: |- - extraScopes is an optional field that configures the extra scopes that should - be requested by the platform component when making authentication requests to the - identity provider. - This is useful if you have configured claim mappings that requires specific - scopes to be requested beyond the standard OIDC scopes. - - When omitted, no additional scopes are requested. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - clientID - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - required: - - claimMappings - - issuer - - name - type: object - maxItems: 1 - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - serviceAccountIssuer: - description: |- - serviceAccountIssuer is the identifier of the bound service account token - issuer. - The default is https://kubernetes.default.svc - WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the - previous issuer value. Instead, the tokens issued by previous service account issuer will continue to - be trusted for a time period chosen by the platform (currently set to 24h). - This time period is subject to change over time. - This allows internal components to transition to use new service account issuer without service distruption. - type: string - type: - description: |- - type identifies the cluster managed, user facing authentication mode in use. - Specifically, it manages the component that responds to login attempts. - The default is IntegratedOAuth. - enum: - - "" - - None - - IntegratedOAuth - - OIDC - type: string - webhookTokenAuthenticator: - description: |- - webhookTokenAuthenticator configures a remote token reviewer. - These remote authentication webhooks can be used to verify bearer tokens - via the tokenreviews.authentication.k8s.io REST API. This is required to - honor bearer tokens that are provisioned by an external authentication service. - - Can only be set if "Type" is set to "None". - properties: - kubeConfig: - description: |- - kubeConfig references a secret that contains kube config file data which - describes how to access the remote webhook service. - The namespace for the referenced secret is openshift-config. - - For further details, see: - - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - properties: - name: - description: name is the metadata.name of the referenced secret - type: string - required: - - name - type: object - required: - - kubeConfig - type: object - webhookTokenAuthenticators: - description: webhookTokenAuthenticators is DEPRECATED, setting it - has no effect. - items: - description: |- - deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. - It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. - properties: - kubeConfig: - description: |- - kubeConfig contains kube config file data which describes how to access the remote webhook service. - For further details, see: - https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication - The key "kubeConfig" is used to locate the data. - If the secret or expected key is not found, the webhook is not honored. - If the specified kube config data is not valid, the webhook is not honored. - The namespace for this secret is determined by the point of use. - properties: - name: - description: name is the metadata.name of the referenced - secret - type: string - required: - - name - type: object - type: object - type: array - x-kubernetes-list-type: atomic - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - integratedOAuthMetadata: - description: |- - integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 - Authorization Server Metadata for the in-cluster integrated OAuth server. - This discovery document can be viewed from its served location: - oc get --raw '/.well-known/oauth-authorization-server' - For further details, see the IETF Draft: - https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 - This contains the observed value based on cluster state. - An explicitly set value in spec.oauthMetadata has precedence over this field. - This field has no meaning if authentication spec.type is not set to IntegratedOAuth. - The key "oauthMetadata" is used to locate the data. - If the config map or expected key is not found, no metadata is served. - If the specified metadata is not valid, no metadata is served. - The namespace for this config map is openshift-config-managed. - properties: - name: - description: name is the metadata.name of the referenced config - map - type: string - required: - - name - type: object - oidcClients: - description: |- - oidcClients is where participating operators place the current OIDC client status - for OIDC clients that can be customized by the cluster-admin. - items: - description: |- - OIDCClientStatus represents the current state - of platform components and how they interact with - the configured identity providers. - properties: - componentName: - description: |- - componentName is a required field that specifies the name of the platform - component using the identity provider as an authentication mode. - It is used in combination with componentNamespace as a unique identifier. - - componentName must not be an empty string ("") and must not exceed 256 characters in length. - maxLength: 256 - minLength: 1 - type: string - componentNamespace: - description: |- - componentNamespace is a required field that specifies the namespace in which the - platform component using the identity provider as an authentication - mode is running. - It is used in combination with componentName as a unique identifier. - - componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. - maxLength: 63 - minLength: 1 - type: string - conditions: - description: |- - conditions are used to communicate the state of the `oidcClients` entry. - - Supported conditions include Available, Degraded and Progressing. - - If Available is true, the component is successfully using the configured client. - If Degraded is true, that means something has gone wrong trying to handle the client configuration. - If Progressing is true, that means the component is taking some action related to the `oidcClients` entry. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - consumingUsers: - description: |- - consumingUsers is an optional list of ServiceAccounts requiring - read permissions on the `clientSecret` secret. - - consumingUsers must not exceed 5 entries. - items: - description: ConsumingUser is an alias for string which we - add validation to. Currently only service accounts are supported. - maxLength: 512 - minLength: 1 - pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - maxItems: 5 - type: array - x-kubernetes-list-type: set - currentOIDCClients: - description: |- - currentOIDCClients is an optional list of clients that the component is currently using. - Entries must have unique issuerURL/clientID pairs. - items: - description: |- - OIDCClientReference is a reference to a platform component - client configuration. - properties: - clientID: - description: |- - clientID is a required field that specifies the client identifier, from - the identity provider, that the platform component is using for authentication - requests made to the identity provider. - - clientID must not be empty. - minLength: 1 - type: string - issuerURL: - description: |- - issuerURL is a required field that specifies the URL of the identity - provider that this client is configured to make requests against. - - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] - type: string - oidcProviderName: - description: |- - oidcProviderName is a required reference to the 'name' of the identity provider - configured in 'oidcProviders' that this client is associated with. - - oidcProviderName must not be an empty string (""). - minLength: 1 - type: string - required: - - clientID - - issuerURL - - oidcProviderName - type: object - type: array - x-kubernetes-list-map-keys: - - issuerURL - - clientID - x-kubernetes-list-type: map - required: - - componentName - - componentNamespace - type: object - maxItems: 20 - type: array - x-kubernetes-list-map-keys: - - componentNamespace - - componentName - x-kubernetes-list-type: map - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: all oidcClients in the oidcProviders must match their componentName - and componentNamespace to either a previously configured oidcClient or - they must exist in the status.oidcClients - rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) - || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace - == specC.componentNamespace && statusC.componentName == specC.componentName) - || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, - oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, - oldC.componentNamespace == specC.componentNamespace && oldC.componentName - == specC.componentName)))))' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications.crd.yaml similarity index 97% rename from vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml rename to vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications.crd.yaml index 5b604bbd2..d6e1cf084 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications.crd.yaml @@ -4,9 +4,9 @@ metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/470 api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/bootstrap-required: "true" - release.openshift.io/feature-set: DevPreviewNoUpgrade name: authentications.config.openshift.io spec: group: config.openshift.io @@ -89,7 +89,7 @@ spec: used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. - A maximum of 64 extra attribute mappings may be provided. + A maximum of 32 extra attribute mappings may be provided. items: description: |- ExtraMapping allows specifying a key and CEL expression @@ -170,16 +170,16 @@ spec: For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar'). - valueExpression must not exceed 4096 characters in length. + valueExpression must not exceed 1024 characters in length. valueExpression must not be empty. - maxLength: 4096 + maxLength: 1024 minLength: 1 type: string required: - key - valueExpression type: object - maxItems: 64 + maxItems: 32 type: array x-kubernetes-list-map-keys: - key @@ -255,8 +255,8 @@ spec: Precisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length - and must not exceed 4096 characters in length. - maxLength: 4096 + and must not exceed 1024 characters in length. + maxLength: 1024 minLength: 1 type: string type: object @@ -441,9 +441,22 @@ spec: The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. - issuerURL must use the 'https' scheme. - pattern: ^https:\/\/[^\s] + Must be at least 1 character and must not exceed 512 characters in length. + Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. + maxLength: 512 + minLength: 1 type: string + x-kubernetes-validations: + - message: must be a valid URL + rule: isURL(self) + - message: must use the 'https' scheme + rule: isURL(self) && url(self).getScheme() == 'https' + - message: must not have a query + rule: isURL(self) && url(self).getQuery() == {} + - message: must not have a fragment + rule: self.find('#(.+)$') == '' + - message: must not have user info + rule: self.find('@') == '' required: - audiences - issuerURL diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-Default.crd.yaml new file mode 100644 index 000000000..29dc56153 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-Default.crd.yaml @@ -0,0 +1,415 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2310 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default + name: clusterimagepolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterImagePolicy + listKind: ClusterImagePolicyList + plural: clusterimagepolicies + singular: clusterimagepolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterImagePolicy holds cluster-wide configuration for image signature verification + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the configuration for the cluster image policy. + properties: + policy: + description: |- + policy is a required field that contains configuration to allow scopes to be verified, and defines how + images not matching the verification policy will be treated. + properties: + rootOfTrust: + description: |- + rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + properties: + fulcioCAWithRekor: + description: |- + fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + For more information about Fulcio and Rekor, please refer to the document at: + https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + properties: + fulcioCAData: + description: |- + fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + fulcioCAData must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + x-kubernetes-validations: + - message: the fulcioCAData must start with base64 encoding + of '-----BEGIN CERTIFICATE-----'. + rule: string(self).startsWith('-----BEGIN CERTIFICATE-----') + - message: the fulcioCAData must end with base64 encoding + of '-----END CERTIFICATE-----'. + rule: string(self).endsWith('-----END CERTIFICATE-----\n') + || string(self).endsWith('-----END CERTIFICATE-----') + fulcioSubject: + description: fulcioSubject is a required field specifies + OIDC issuer and the email of the Fulcio authentication + configuration. + properties: + oidcIssuer: + description: |- + oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + Example: "https://expected.OIDC.issuer/" + maxLength: 2048 + type: string + x-kubernetes-validations: + - message: oidcIssuer must be a valid URL + rule: isURL(self) + signedEmail: + description: |- + signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + The signedEmail must be a valid email address and at most 320 characters in length. + Example: "expected-signing-user@example.com" + maxLength: 320 + type: string + x-kubernetes-validations: + - message: invalid email address + rule: self.matches('^\\S+@\\S+$') + required: + - oidcIssuer + - signedEmail + type: object + rekorKeyData: + description: |- + rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + rekorKeyData must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + x-kubernetes-validations: + - message: the rekorKeyData must start with base64 encoding + of '-----BEGIN PUBLIC KEY-----'. + rule: string(self).startsWith('-----BEGIN PUBLIC KEY-----') + - message: the rekorKeyData must end with base64 encoding + of '-----END PUBLIC KEY-----'. + rule: string(self).endsWith('-----END PUBLIC KEY-----\n') + || string(self).endsWith('-----END PUBLIC KEY-----') + required: + - fulcioCAData + - fulcioSubject + - rekorKeyData + type: object + policyType: + description: |- + policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + type: string + publicKey: + description: |- + publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + publicKey is required when policyType is PublicKey, and forbidden otherwise. + properties: + keyData: + description: |- + keyData is a required field contains inline base64-encoded data for the PEM format public key. + keyData must be at most 8192 characters. + format: byte + maxLength: 8192 + minLength: 68 + type: string + x-kubernetes-validations: + - message: the keyData must start with base64 encoding + of '-----BEGIN PUBLIC KEY-----'. + rule: string(self).startsWith('-----BEGIN PUBLIC KEY-----') + - message: the keyData must end with base64 encoding of + '-----END PUBLIC KEY-----'. + rule: string(self).endsWith('-----END PUBLIC KEY-----\n') + || string(self).endsWith('-----END PUBLIC KEY-----') + rekorKeyData: + description: |- + rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + rekorKeyData must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + x-kubernetes-validations: + - message: the rekorKeyData must start with base64 encoding + of '-----BEGIN PUBLIC KEY-----'. + rule: string(self).startsWith('-----BEGIN PUBLIC KEY-----') + - message: the rekorKeyData must end with base64 encoding + of '-----END PUBLIC KEY-----'. + rule: string(self).endsWith('-----END PUBLIC KEY-----\n') + || string(self).endsWith('-----END PUBLIC KEY-----') + required: + - keyData + type: object + required: + - policyType + type: object + x-kubernetes-validations: + - message: publicKey is required when policyType is PublicKey, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''PublicKey'' + ? has(self.publicKey) : !has(self.publicKey)' + - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' + ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' + signedIdentity: + description: |- + signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + properties: + exactRepository: + description: |- + exactRepository specifies the repository that must be exactly matched by the identity in the signature. + exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + properties: + repository: + description: |- + repository is the reference of the image identity to be matched. + repository is required if matchPolicy is set to "ExactRepository". + The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity. + The repository or prefix must starts with 'localhost' + or a valid '.' separated domain. If contains registry + paths, the path component names must start with at + least one letter or number, with following parts able + to be separated by one period, one or two underscore + and multiple dashes. + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - repository + type: object + matchPolicy: + description: |- + matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + enum: + - MatchRepoDigestOrExact + - MatchRepository + - ExactRepository + - RemapIdentity + type: string + remapIdentity: + description: |- + remapIdentity specifies the prefix remapping rule for verifying image identity. + remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + properties: + prefix: + description: |- + prefix is required if matchPolicy is set to "RemapIdentity". + prefix is the prefix of the image identity to be matched. + If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity. + The repository or prefix must starts with 'localhost' + or a valid '.' separated domain. If contains registry + paths, the path component names must start with at + least one letter or number, with following parts able + to be separated by one period, one or two underscore + and multiple dashes. + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + signedPrefix: + description: |- + signedPrefix is required if matchPolicy is set to "RemapIdentity". + signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity. + The repository or prefix must starts with 'localhost' + or a valid '.' separated domain. If contains registry + paths, the path component names must start with at + least one letter or number, with following parts able + to be separated by one period, one or two underscore + and multiple dashes. + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - prefix + - signedPrefix + type: object + required: + - matchPolicy + type: object + x-kubernetes-validations: + - message: exactRepository is required when matchPolicy is ExactRepository, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') + ? has(self.exactRepository) : !has(self.exactRepository)' + - message: remapIdentity is required when matchPolicy is RemapIdentity, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') + ? has(self.remapIdentity) : !has(self.remapIdentity)' + required: + - rootOfTrust + type: object + scopes: + description: |- + scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + For additional details about the format, please refer to the document explaining the docker transport field, + which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + items: + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid image scope format, scope must contain a fully + qualified domain name or 'localhost' + rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] + == ''localhost'' : true' + - message: invalid image scope with wildcard, a wildcard can only + be at the start of the domain and is only supported for subdomain + matching, not path matching + rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') + : true' + - message: invalid repository namespace or image specification in + the image scope + rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') + : true' + maxItems: 256 + type: array + x-kubernetes-list-type: set + required: + - policy + - scopes + type: object + status: + description: status contains the observed state of the resource. + properties: + conditions: + description: conditions provide details on the status of this API + Resource. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-Default.crd.yaml new file mode 100644 index 000000000..ee88c398e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-Default.crd.yaml @@ -0,0 +1,416 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2310 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default + name: imagepolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImagePolicy + listKind: ImagePolicyList + plural: imagepolicies + singular: imagepolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + ImagePolicy holds namespace-wide configuration for image signature verification + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + policy: + description: |- + policy is a required field that contains configuration to allow scopes to be verified, and defines how + images not matching the verification policy will be treated. + properties: + rootOfTrust: + description: |- + rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + properties: + fulcioCAWithRekor: + description: |- + fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + For more information about Fulcio and Rekor, please refer to the document at: + https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + properties: + fulcioCAData: + description: |- + fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + fulcioCAData must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + x-kubernetes-validations: + - message: the fulcioCAData must start with base64 encoding + of '-----BEGIN CERTIFICATE-----'. + rule: string(self).startsWith('-----BEGIN CERTIFICATE-----') + - message: the fulcioCAData must end with base64 encoding + of '-----END CERTIFICATE-----'. + rule: string(self).endsWith('-----END CERTIFICATE-----\n') + || string(self).endsWith('-----END CERTIFICATE-----') + fulcioSubject: + description: fulcioSubject is a required field specifies + OIDC issuer and the email of the Fulcio authentication + configuration. + properties: + oidcIssuer: + description: |- + oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + Example: "https://expected.OIDC.issuer/" + maxLength: 2048 + type: string + x-kubernetes-validations: + - message: oidcIssuer must be a valid URL + rule: isURL(self) + signedEmail: + description: |- + signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + The signedEmail must be a valid email address and at most 320 characters in length. + Example: "expected-signing-user@example.com" + maxLength: 320 + type: string + x-kubernetes-validations: + - message: invalid email address + rule: self.matches('^\\S+@\\S+$') + required: + - oidcIssuer + - signedEmail + type: object + rekorKeyData: + description: |- + rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + rekorKeyData must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + x-kubernetes-validations: + - message: the rekorKeyData must start with base64 encoding + of '-----BEGIN PUBLIC KEY-----'. + rule: string(self).startsWith('-----BEGIN PUBLIC KEY-----') + - message: the rekorKeyData must end with base64 encoding + of '-----END PUBLIC KEY-----'. + rule: string(self).endsWith('-----END PUBLIC KEY-----\n') + || string(self).endsWith('-----END PUBLIC KEY-----') + required: + - fulcioCAData + - fulcioSubject + - rekorKeyData + type: object + policyType: + description: |- + policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + type: string + publicKey: + description: |- + publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + publicKey is required when policyType is PublicKey, and forbidden otherwise. + properties: + keyData: + description: |- + keyData is a required field contains inline base64-encoded data for the PEM format public key. + keyData must be at most 8192 characters. + format: byte + maxLength: 8192 + minLength: 68 + type: string + x-kubernetes-validations: + - message: the keyData must start with base64 encoding + of '-----BEGIN PUBLIC KEY-----'. + rule: string(self).startsWith('-----BEGIN PUBLIC KEY-----') + - message: the keyData must end with base64 encoding of + '-----END PUBLIC KEY-----'. + rule: string(self).endsWith('-----END PUBLIC KEY-----\n') + || string(self).endsWith('-----END PUBLIC KEY-----') + rekorKeyData: + description: |- + rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + rekorKeyData must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + x-kubernetes-validations: + - message: the rekorKeyData must start with base64 encoding + of '-----BEGIN PUBLIC KEY-----'. + rule: string(self).startsWith('-----BEGIN PUBLIC KEY-----') + - message: the rekorKeyData must end with base64 encoding + of '-----END PUBLIC KEY-----'. + rule: string(self).endsWith('-----END PUBLIC KEY-----\n') + || string(self).endsWith('-----END PUBLIC KEY-----') + required: + - keyData + type: object + required: + - policyType + type: object + x-kubernetes-validations: + - message: publicKey is required when policyType is PublicKey, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''PublicKey'' + ? has(self.publicKey) : !has(self.publicKey)' + - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' + ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' + signedIdentity: + description: |- + signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + properties: + exactRepository: + description: |- + exactRepository specifies the repository that must be exactly matched by the identity in the signature. + exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + properties: + repository: + description: |- + repository is the reference of the image identity to be matched. + repository is required if matchPolicy is set to "ExactRepository". + The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity. + The repository or prefix must starts with 'localhost' + or a valid '.' separated domain. If contains registry + paths, the path component names must start with at + least one letter or number, with following parts able + to be separated by one period, one or two underscore + and multiple dashes. + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - repository + type: object + matchPolicy: + description: |- + matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + enum: + - MatchRepoDigestOrExact + - MatchRepository + - ExactRepository + - RemapIdentity + type: string + remapIdentity: + description: |- + remapIdentity specifies the prefix remapping rule for verifying image identity. + remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + properties: + prefix: + description: |- + prefix is required if matchPolicy is set to "RemapIdentity". + prefix is the prefix of the image identity to be matched. + If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity. + The repository or prefix must starts with 'localhost' + or a valid '.' separated domain. If contains registry + paths, the path component names must start with at + least one letter or number, with following parts able + to be separated by one period, one or two underscore + and multiple dashes. + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + signedPrefix: + description: |- + signedPrefix is required if matchPolicy is set to "RemapIdentity". + signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity. + The repository or prefix must starts with 'localhost' + or a valid '.' separated domain. If contains registry + paths, the path component names must start with at + least one letter or number, with following parts able + to be separated by one period, one or two underscore + and multiple dashes. + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - prefix + - signedPrefix + type: object + required: + - matchPolicy + type: object + x-kubernetes-validations: + - message: exactRepository is required when matchPolicy is ExactRepository, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') + ? has(self.exactRepository) : !has(self.exactRepository)' + - message: remapIdentity is required when matchPolicy is RemapIdentity, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') + ? has(self.remapIdentity) : !has(self.remapIdentity)' + required: + - rootOfTrust + type: object + scopes: + description: |- + scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + For additional details about the format, please refer to the document explaining the docker transport field, + which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + items: + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid image scope format, scope must contain a fully + qualified domain name or 'localhost' + rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] + == ''localhost'' : true' + - message: invalid image scope with wildcard, a wildcard can only + be at the start of the domain and is only supported for subdomain + matching, not path matching + rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') + : true' + - message: invalid repository namespace or image specification in + the image scope + rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') + : true' + maxItems: 256 + type: array + x-kubernetes-list-type: set + required: + - policy + - scopes + type: object + status: + description: status contains the observed state of the resource. + properties: + conditions: + description: |- + conditions provide details on the status of this API Resource. + condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml index 02ae2dcb4..9f01a6aeb 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml @@ -229,7 +229,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported. @@ -1440,6 +1440,109 @@ spec: description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. type: string + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: |- + cloudLoadBalancerConfig holds configuration related to DNS and cloud + load balancers. It allows configuration of in-cluster DNS as an alternative + to the platform default DNS implementation. + When using the ClusterHosted DNS type, Load Balancer IP addresses + must be provided for the API and internal API load balancers as well as the + ingress load balancer. + properties: + clusterHosted: + description: |- + clusterHosted holds the IP addresses of API, API-Int and Ingress Load + Balancers on Cloud Platforms. The DNS solution hosted within the cluster + use these IP addresses to provide resolution for API, API-Int and Ingress + services. + properties: + apiIntLoadBalancerIPs: + description: |- + apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: |- + apiLoadBalancerIPs holds Load Balancer IPs for the API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Could be empty for private clusters. + Entries in the apiLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: |- + ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the ingressLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: |- + dnsType indicates the type of DNS solution in use within the cluster. Its default value of + `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected by this setting. + The value is immutable after it has been set at install time. + Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + Enabling this functionality allows the user to start their own DNS solution outside the cluster after + installation is complete. The customer would be responsible for configuring this custom DNS solution, + and it can be run in addition to the in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' cloudName: description: |- cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK @@ -1892,7 +1995,7 @@ spec: used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. - The maximum number of endpoint overrides allowed is 9. + The maximum number of endpoint overrides allowed is 11. items: description: |- GCPServiceEndpoint store the configuration of a custom url to @@ -1915,8 +2018,11 @@ spec: - DNS - File - IAM + - IAMCredentials + - OAuth - ServiceUsage - Storage + - STS type: string url: description: |- @@ -1942,7 +2048,7 @@ spec: - name - url type: object - maxItems: 8 + maxItems: 11 type: array x-kubernetes-list-map-keys: - name @@ -1989,7 +2095,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. items: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml index 6dcc0cfb6..4ecbc18e9 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml @@ -1051,6 +1051,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1492,6 +1493,110 @@ spec: description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: |- + cloudLoadBalancerConfig holds configuration related to DNS and cloud + load balancers. It allows configuration of in-cluster DNS as an alternative + to the platform default DNS implementation. + When using the ClusterHosted DNS type, Load Balancer IP addresses + must be provided for the API and internal API load balancers as well as the + ingress load balancer. + nullable: true + properties: + clusterHosted: + description: |- + clusterHosted holds the IP addresses of API, API-Int and Ingress Load + Balancers on Cloud Platforms. The DNS solution hosted within the cluster + use these IP addresses to provide resolution for API, API-Int and Ingress + services. + properties: + apiIntLoadBalancerIPs: + description: |- + apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: |- + apiLoadBalancerIPs holds Load Balancer IPs for the API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Could be empty for private clusters. + Entries in the apiLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: |- + ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the ingressLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: |- + dnsType indicates the type of DNS solution in use within the cluster. Its default value of + `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected by this setting. + The value is immutable after it has been set at install time. + Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + Enabling this functionality allows the user to start their own DNS solution outside the cluster after + installation is complete. The customer would be responsible for configuring this custom DNS solution, + and it can be run in addition to the in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' projectID: description: resourceGroupName is the Project ID for new GCP resources created for the cluster. @@ -1637,7 +1742,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. items: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml index f0e96f9fa..44185f514 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml @@ -229,7 +229,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported. @@ -1440,6 +1440,109 @@ spec: description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. type: string + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: |- + cloudLoadBalancerConfig holds configuration related to DNS and cloud + load balancers. It allows configuration of in-cluster DNS as an alternative + to the platform default DNS implementation. + When using the ClusterHosted DNS type, Load Balancer IP addresses + must be provided for the API and internal API load balancers as well as the + ingress load balancer. + properties: + clusterHosted: + description: |- + clusterHosted holds the IP addresses of API, API-Int and Ingress Load + Balancers on Cloud Platforms. The DNS solution hosted within the cluster + use these IP addresses to provide resolution for API, API-Int and Ingress + services. + properties: + apiIntLoadBalancerIPs: + description: |- + apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: |- + apiLoadBalancerIPs holds Load Balancer IPs for the API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Could be empty for private clusters. + Entries in the apiLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: |- + ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the ingressLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: |- + dnsType indicates the type of DNS solution in use within the cluster. Its default value of + `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected by this setting. + The value is immutable after it has been set at install time. + Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + Enabling this functionality allows the user to start their own DNS solution outside the cluster after + installation is complete. The customer would be responsible for configuring this custom DNS solution, + and it can be run in addition to the in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' cloudName: description: |- cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK @@ -1892,7 +1995,7 @@ spec: used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. - The maximum number of endpoint overrides allowed is 9. + The maximum number of endpoint overrides allowed is 11. items: description: |- GCPServiceEndpoint store the configuration of a custom url to @@ -1915,8 +2018,11 @@ spec: - DNS - File - IAM + - IAMCredentials + - OAuth - ServiceUsage - Storage + - STS type: string url: description: |- @@ -1942,7 +2048,7 @@ spec: - name - url type: object - maxItems: 8 + maxItems: 11 type: array x-kubernetes-list-map-keys: - name @@ -1989,7 +2095,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. items: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml index f4d39412d..27e1ce7b4 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml @@ -229,7 +229,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported. @@ -1440,6 +1440,109 @@ spec: description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. type: string + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: |- + cloudLoadBalancerConfig holds configuration related to DNS and cloud + load balancers. It allows configuration of in-cluster DNS as an alternative + to the platform default DNS implementation. + When using the ClusterHosted DNS type, Load Balancer IP addresses + must be provided for the API and internal API load balancers as well as the + ingress load balancer. + properties: + clusterHosted: + description: |- + clusterHosted holds the IP addresses of API, API-Int and Ingress Load + Balancers on Cloud Platforms. The DNS solution hosted within the cluster + use these IP addresses to provide resolution for API, API-Int and Ingress + services. + properties: + apiIntLoadBalancerIPs: + description: |- + apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: |- + apiLoadBalancerIPs holds Load Balancer IPs for the API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Could be empty for private clusters. + Entries in the apiLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: |- + ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the ingressLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: |- + dnsType indicates the type of DNS solution in use within the cluster. Its default value of + `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected by this setting. + The value is immutable after it has been set at install time. + Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + Enabling this functionality allows the user to start their own DNS solution outside the cluster after + installation is complete. The customer would be responsible for configuring this custom DNS solution, + and it can be run in addition to the in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' cloudName: description: |- cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK @@ -1892,7 +1995,7 @@ spec: used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. - The maximum number of endpoint overrides allowed is 9. + The maximum number of endpoint overrides allowed is 11. items: description: |- GCPServiceEndpoint store the configuration of a custom url to @@ -1915,8 +2018,11 @@ spec: - DNS - File - IAM + - IAMCredentials + - OAuth - ServiceUsage - Storage + - STS type: string url: description: |- @@ -1942,7 +2048,7 @@ spec: - name - url type: object - maxItems: 8 + maxItems: 11 type: array x-kubernetes-list-map-keys: - name @@ -1989,7 +2095,7 @@ spec: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been - overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. items: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..8e7d3c392 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-CustomNoUpgrade.crd.yaml @@ -0,0 +1,233 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2448 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: insightsdatagathers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: InsightsDataGather + listKind: InsightsDataGatherList + plural: insightsdatagathers + singular: insightsdatagather + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + InsightsDataGather provides data gather configuration options for the Insights Operator. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + gatherConfig: + description: gatherConfig is a required spec attribute that includes + all the configuration options related to gathering of the Insights + data and its uploading to the ingress. + properties: + dataPolicy: + description: |- + dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + It may not exceed 2 items and must not contain duplicates. + Valid values are ObfuscateNetworking and WorkloadNames. + When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + When omitted no obfuscation is applied. + items: + description: DataPolicyOption declares valid data policy options + enum: + - ObfuscateNetworking + - WorkloadNames + type: string + maxItems: 2 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: dataPolicy items must be unique + rule: self.all(x, self.exists_one(y, x == y)) + gatherers: + description: gatherers is a required field that specifies the + configuration of the gatherers. + properties: + custom: + description: |- + custom provides gathering configuration. + It is required when mode is Custom, and forbidden otherwise. + Custom configuration allows user to disable only a subset of gatherers. + Gatherers that are not explicitly disabled in custom configuration will run. + properties: + configs: + description: |- + configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + It may not exceed 100 items and each gatherer can be present only once. + It is possible to disable an entire set of gatherers while allowing a specific function within that set. + The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + items: + description: GathererConfig allows to configure specific + gatherers + properties: + name: + description: |- + name is the required name of a specific gatherer. + It may not exceed 256 characters. + The format for a gatherer name is: {gatherer}/{function} where the function is optional. + Gatherer consists of a lowercase letters only that may include underscores (_). + Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + maxLength: 256 + minLength: 1 + type: string + x-kubernetes-validations: + - message: gatherer name must be in the format of + {gatherer}/{function} where the gatherer and + function are lowercase letters only that may + include underscores (_) and are separated by + a forward slash (/) if the function is provided + rule: self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$") + state: + description: |- + state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + When set to Enabled the gatherer will run. + When set to Disabled the gatherer will not run. + enum: + - Enabled + - Disabled + type: string + required: + - name + - state + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - configs + type: object + mode: + description: |- + mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + When set to All, all gatherers will run and gather data. + When set to None, all gatherers will be disabled and no data will be gathered. + When set to Custom, the custom configuration from the custom field will be applied. + enum: + - All + - None + - Custom + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: custom is required when mode is Custom, and forbidden + otherwise + rule: 'has(self.mode) && self.mode == ''Custom'' ? has(self.custom) + : !has(self.custom)' + storage: + description: |- + storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + If omitted, the gathering job will use ephemeral storage. + properties: + persistentVolume: + description: |- + persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + The PersistentVolume must be created in the openshift-insights namespace. + properties: + claim: + description: |- + claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + The PersistentVolumeClaim must be created in the openshift-insights namespace. + properties: + name: + description: |- + name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. + It is a string that follows the DNS1123 subdomain format. + It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist + of lower case alphanumeric characters, '-' or + '.', and must start and end with an alphanumeric + character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + mountPath: + description: |- + mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default mount path is /var/lib/insights-operator + The path may not exceed 1024 characters and must not contain a colon. + maxLength: 1024 + minLength: 1 + type: string + x-kubernetes-validations: + - message: mountPath must not contain a colon + rule: '!self.contains('':'')' + required: + - claim + type: object + type: + description: |- + type is a required field that specifies the type of storage that will be used to store the Insights data archive. + Valid values are "PersistentVolume" and "Ephemeral". + When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + enum: + - PersistentVolume + - Ephemeral + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: persistentVolume is required when type is PersistentVolume, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''PersistentVolume'' ? has(self.persistentVolume) + : !has(self.persistentVolume)' + required: + - gatherers + type: object + required: + - gatherConfig + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..0ecdc12f2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,233 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2448 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: insightsdatagathers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: InsightsDataGather + listKind: InsightsDataGatherList + plural: insightsdatagathers + singular: insightsdatagather + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + InsightsDataGather provides data gather configuration options for the Insights Operator. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + gatherConfig: + description: gatherConfig is a required spec attribute that includes + all the configuration options related to gathering of the Insights + data and its uploading to the ingress. + properties: + dataPolicy: + description: |- + dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + It may not exceed 2 items and must not contain duplicates. + Valid values are ObfuscateNetworking and WorkloadNames. + When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + When omitted no obfuscation is applied. + items: + description: DataPolicyOption declares valid data policy options + enum: + - ObfuscateNetworking + - WorkloadNames + type: string + maxItems: 2 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: dataPolicy items must be unique + rule: self.all(x, self.exists_one(y, x == y)) + gatherers: + description: gatherers is a required field that specifies the + configuration of the gatherers. + properties: + custom: + description: |- + custom provides gathering configuration. + It is required when mode is Custom, and forbidden otherwise. + Custom configuration allows user to disable only a subset of gatherers. + Gatherers that are not explicitly disabled in custom configuration will run. + properties: + configs: + description: |- + configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + It may not exceed 100 items and each gatherer can be present only once. + It is possible to disable an entire set of gatherers while allowing a specific function within that set. + The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + items: + description: GathererConfig allows to configure specific + gatherers + properties: + name: + description: |- + name is the required name of a specific gatherer. + It may not exceed 256 characters. + The format for a gatherer name is: {gatherer}/{function} where the function is optional. + Gatherer consists of a lowercase letters only that may include underscores (_). + Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + maxLength: 256 + minLength: 1 + type: string + x-kubernetes-validations: + - message: gatherer name must be in the format of + {gatherer}/{function} where the gatherer and + function are lowercase letters only that may + include underscores (_) and are separated by + a forward slash (/) if the function is provided + rule: self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$") + state: + description: |- + state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + When set to Enabled the gatherer will run. + When set to Disabled the gatherer will not run. + enum: + - Enabled + - Disabled + type: string + required: + - name + - state + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - configs + type: object + mode: + description: |- + mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + When set to All, all gatherers will run and gather data. + When set to None, all gatherers will be disabled and no data will be gathered. + When set to Custom, the custom configuration from the custom field will be applied. + enum: + - All + - None + - Custom + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: custom is required when mode is Custom, and forbidden + otherwise + rule: 'has(self.mode) && self.mode == ''Custom'' ? has(self.custom) + : !has(self.custom)' + storage: + description: |- + storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + If omitted, the gathering job will use ephemeral storage. + properties: + persistentVolume: + description: |- + persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + The PersistentVolume must be created in the openshift-insights namespace. + properties: + claim: + description: |- + claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + The PersistentVolumeClaim must be created in the openshift-insights namespace. + properties: + name: + description: |- + name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. + It is a string that follows the DNS1123 subdomain format. + It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist + of lower case alphanumeric characters, '-' or + '.', and must start and end with an alphanumeric + character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + mountPath: + description: |- + mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default mount path is /var/lib/insights-operator + The path may not exceed 1024 characters and must not contain a colon. + maxLength: 1024 + minLength: 1 + type: string + x-kubernetes-validations: + - message: mountPath must not contain a colon + rule: '!self.contains('':'')' + required: + - claim + type: object + type: + description: |- + type is a required field that specifies the type of storage that will be used to store the Insights data archive. + Valid values are "PersistentVolume" and "Ephemeral". + When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + enum: + - PersistentVolume + - Ephemeral + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: persistentVolume is required when type is PersistentVolume, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''PersistentVolume'' ? has(self.persistentVolume) + : !has(self.persistentVolume)' + required: + - gatherers + type: object + required: + - gatherConfig + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..afaf9c37d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,233 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2448 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: insightsdatagathers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: InsightsDataGather + listKind: InsightsDataGatherList + plural: insightsdatagathers + singular: insightsdatagather + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + InsightsDataGather provides data gather configuration options for the Insights Operator. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + gatherConfig: + description: gatherConfig is a required spec attribute that includes + all the configuration options related to gathering of the Insights + data and its uploading to the ingress. + properties: + dataPolicy: + description: |- + dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + It may not exceed 2 items and must not contain duplicates. + Valid values are ObfuscateNetworking and WorkloadNames. + When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + When omitted no obfuscation is applied. + items: + description: DataPolicyOption declares valid data policy options + enum: + - ObfuscateNetworking + - WorkloadNames + type: string + maxItems: 2 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: dataPolicy items must be unique + rule: self.all(x, self.exists_one(y, x == y)) + gatherers: + description: gatherers is a required field that specifies the + configuration of the gatherers. + properties: + custom: + description: |- + custom provides gathering configuration. + It is required when mode is Custom, and forbidden otherwise. + Custom configuration allows user to disable only a subset of gatherers. + Gatherers that are not explicitly disabled in custom configuration will run. + properties: + configs: + description: |- + configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + It may not exceed 100 items and each gatherer can be present only once. + It is possible to disable an entire set of gatherers while allowing a specific function within that set. + The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + items: + description: GathererConfig allows to configure specific + gatherers + properties: + name: + description: |- + name is the required name of a specific gatherer. + It may not exceed 256 characters. + The format for a gatherer name is: {gatherer}/{function} where the function is optional. + Gatherer consists of a lowercase letters only that may include underscores (_). + Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + maxLength: 256 + minLength: 1 + type: string + x-kubernetes-validations: + - message: gatherer name must be in the format of + {gatherer}/{function} where the gatherer and + function are lowercase letters only that may + include underscores (_) and are separated by + a forward slash (/) if the function is provided + rule: self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$") + state: + description: |- + state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + When set to Enabled the gatherer will run. + When set to Disabled the gatherer will not run. + enum: + - Enabled + - Disabled + type: string + required: + - name + - state + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - configs + type: object + mode: + description: |- + mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + When set to All, all gatherers will run and gather data. + When set to None, all gatherers will be disabled and no data will be gathered. + When set to Custom, the custom configuration from the custom field will be applied. + enum: + - All + - None + - Custom + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: custom is required when mode is Custom, and forbidden + otherwise + rule: 'has(self.mode) && self.mode == ''Custom'' ? has(self.custom) + : !has(self.custom)' + storage: + description: |- + storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + If omitted, the gathering job will use ephemeral storage. + properties: + persistentVolume: + description: |- + persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + The PersistentVolume must be created in the openshift-insights namespace. + properties: + claim: + description: |- + claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + The PersistentVolumeClaim must be created in the openshift-insights namespace. + properties: + name: + description: |- + name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. + It is a string that follows the DNS1123 subdomain format. + It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist + of lower case alphanumeric characters, '-' or + '.', and must start and end with an alphanumeric + character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + mountPath: + description: |- + mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + The current default mount path is /var/lib/insights-operator + The path may not exceed 1024 characters and must not contain a colon. + maxLength: 1024 + minLength: 1 + type: string + x-kubernetes-validations: + - message: mountPath must not contain a colon + rule: '!self.contains('':'')' + required: + - claim + type: object + type: + description: |- + type is a required field that specifies the type of storage that will be used to store the Insights data archive. + Valid values are "PersistentVolume" and "Ephemeral". + When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + enum: + - PersistentVolume + - Ephemeral + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: persistentVolume is required when type is PersistentVolume, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''PersistentVolume'' ? has(self.persistentVolume) + : !has(self.persistentVolume)' + required: + - gatherers + type: object + required: + - gatherConfig + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml index 21eee52c7..646978b80 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml @@ -139,8 +139,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -198,6 +199,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -338,7 +376,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 70edc1769..0863934f2 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -616,6 +616,11 @@ func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { *out = make([]AzureResourceTag, len(*in)) copy(*out, *in) } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } return } @@ -1731,6 +1736,27 @@ func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Custom) DeepCopyInto(out *Custom) { + *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make([]GathererConfig, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Custom. +func (in *Custom) DeepCopy() *Custom { + if in == nil { + return nil + } + out := new(Custom) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { *out = *in @@ -2462,6 +2488,62 @@ func (in *GCPServiceEndpoint) DeepCopy() *GCPServiceEndpoint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { + *out = *in + if in.DataPolicy != nil { + in, out := &in.DataPolicy, &out.DataPolicy + *out = make([]DataPolicyOption, len(*in)) + copy(*out, *in) + } + in.Gatherers.DeepCopyInto(&out.Gatherers) + out.Storage = in.Storage + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig. +func (in *GatherConfig) DeepCopy() *GatherConfig { + if in == nil { + return nil + } + out := new(GatherConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GathererConfig) DeepCopyInto(out *GathererConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererConfig. +func (in *GathererConfig) DeepCopy() *GathererConfig { + if in == nil { + return nil + } + out := new(GathererConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gatherers) DeepCopyInto(out *Gatherers) { + *out = *in + in.Custom.DeepCopyInto(&out.Custom) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gatherers. +func (in *Gatherers) DeepCopy() *Gatherers { + if in == nil { + return nil + } + out := new(Gatherers) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { *out = *in @@ -3565,6 +3647,83 @@ func (in *IngressStatus) DeepCopy() *IngressStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather. +func (in *InsightsDataGather) DeepCopy() *InsightsDataGather { + if in == nil { + return nil + } + out := new(InsightsDataGather) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGather) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsDataGather, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList. +func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList { + if in == nil { + return nil + } + out := new(InsightsDataGatherList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) { + *out = *in + in.GatherConfig.DeepCopyInto(&out.GatherConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec. +func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec { + if in == nil { + return nil + } + out := new(InsightsDataGatherSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { *out = *in @@ -5014,6 +5173,39 @@ func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimReference) DeepCopyInto(out *PersistentVolumeClaimReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimReference. +func (in *PersistentVolumeClaimReference) DeepCopy() *PersistentVolumeClaimReference { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeConfig) DeepCopyInto(out *PersistentVolumeConfig) { + *out = *in + out.Claim = in.Claim + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeConfig. +func (in *PersistentVolumeConfig) DeepCopy() *PersistentVolumeConfig { + if in == nil { + return nil + } + out := new(PersistentVolumeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = *in @@ -5976,6 +6168,23 @@ func (in *SignatureStore) DeepCopy() *SignatureStore { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.PersistentVolume = in.PersistentVolume + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StringSource) DeepCopyInto(out *StringSource) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 91881630b..d8d6b502e 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -361,11 +361,12 @@ infrastructures.config.openshift.io: Capability: "" Category: "" FeatureGates: - - AWSClusterHostedDNS + - AWSClusterHostedDNSInstall + - AzureClusterHostedDNSInstall - DualReplica - DyanmicServiceEndpointIBMCloud - - GCPClusterHostedDNS - - GCPCustomAPIEndpoints + - GCPClusterHostedDNSInstall + - GCPCustomAPIEndpointsInstall - HighlyAvailableArbiter - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets @@ -407,6 +408,29 @@ ingresses.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2448 + CRDName: insightsdatagathers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: false + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1 + networks.config.openshift.io: Annotations: release.openshift.io/bootstrap-required: "true" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index eb78ad7ca..31aab4dfe 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -318,7 +318,7 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nWhen omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is the Intermediate profile.", "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", } @@ -399,7 +399,7 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { var map_ExtraMapping = map[string]string{ "": "ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token.", "key": "key is a required field that specifies the string to use as the extra attribute key.\n\nkey must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. key must contain the '/' character, separating the domain and path characters. key must not be empty.\n\nThe domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. It must not exceed 253 characters in length. It must start and end with an alphanumeric character. It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, \"kubernetes.io\", \"k8s.io\", and \"openshift.io\".\n\nThe path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length.", - "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 4096 characters in length. valueExpression must not be empty.", + "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 1024 characters in length. valueExpression must not be empty.", } func (ExtraMapping) SwaggerDoc() map[string]string { @@ -477,7 +477,7 @@ var map_TokenClaimMappings = map[string]string{ "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). For example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. The current default is to use the 'sub' claim.", - "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 64 extra attribute mappings may be provided.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { @@ -487,7 +487,7 @@ func (TokenClaimMappings) SwaggerDoc() map[string]string { var map_TokenClaimOrExpressionMapping = map[string]string{ "": "TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities.", "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated.\n\nPrecisely one of claim or expression must be set. claim must not be specified when expression is set. When specified, claim must be at least 1 character in length and must not exceed 256 characters in length.", - "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 4096 characters in length.", + "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length.", } func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { @@ -504,7 +504,7 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { } var map_TokenIssuer = map[string]string{ - "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nissuerURL must use the 'https' scheme.", + "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", } @@ -651,7 +651,7 @@ func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { } var map_ClusterOperator = map[string]string{ - "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ClusterOperator holds the status of a core or optional OpenShift component managed by the Cluster Version Operator (CVO). This object is used by operators to convey their state to the rest of the cluster. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec holds configuration that could apply to any operator.", "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", @@ -893,7 +893,7 @@ var map_UpdateHistory = map[string]string{ "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", - "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -1480,6 +1480,7 @@ var map_AzurePlatformStatus = map[string]string{ "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { @@ -1612,7 +1613,7 @@ var map_GCPPlatformStatus = map[string]string{ "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", - "serviceEndpoints": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 9.", + "serviceEndpoints": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 11.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { @@ -1652,7 +1653,7 @@ func (GCPServiceEndpoint) SwaggerDoc() map[string]string { var map_IBMCloudPlatformSpec = map[string]string{ "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", } func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { @@ -1666,7 +1667,7 @@ var map_IBMCloudPlatformStatus = map[string]string{ "providerType": "providerType indicates the type of cluster that was created", "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", } func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { @@ -2194,6 +2195,104 @@ func (LoadBalancer) SwaggerDoc() map[string]string { return map_LoadBalancer } +var map_Custom = map[string]string{ + "": "Custom provides the custom configuration of gatherers", + "configs": "configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. It may not exceed 100 items and each gatherer can be present only once. It is possible to disable an entire set of gatherers while allowing a specific function within that set. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", +} + +func (Custom) SwaggerDoc() map[string]string { + return map_Custom +} + +var map_GatherConfig = map[string]string{ + "": "GatherConfig provides data gathering configuration options.", + "dataPolicy": "dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. It may not exceed 2 items and must not contain duplicates. Valid values are ObfuscateNetworking and WorkloadNames. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. When omitted no obfuscation is applied.", + "gatherers": "gatherers is a required field that specifies the configuration of the gatherers.", + "storage": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", +} + +func (GatherConfig) SwaggerDoc() map[string]string { + return map_GatherConfig +} + +var map_GathererConfig = map[string]string{ + "": "GathererConfig allows to configure specific gatherers", + "name": "name is the required name of a specific gatherer. It may not exceed 256 characters. The format for a gatherer name is: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", + "state": "state is a required field that allows you to configure specific gatherer. Valid values are \"Enabled\" and \"Disabled\". When set to Enabled the gatherer will run. When set to Disabled the gatherer will not run.", +} + +func (GathererConfig) SwaggerDoc() map[string]string { + return map_GathererConfig +} + +var map_Gatherers = map[string]string{ + "": "Gatherers specifies the configuration of the gatherers", + "mode": "mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. When set to All, all gatherers will run and gather data. When set to None, all gatherers will be disabled and no data will be gathered. When set to Custom, the custom configuration from the custom field will be applied.", + "custom": "custom provides gathering configuration. It is required when mode is Custom, and forbidden otherwise. Custom configuration allows user to disable only a subset of gatherers. Gatherers that are not explicitly disabled in custom configuration will run.", +} + +func (Gatherers) SwaggerDoc() map[string]string { + return map_Gatherers +} + +var map_InsightsDataGather = map[string]string{ + "": "InsightsDataGather provides data gather configuration options for the Insights Operator.\n\n\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", +} + +func (InsightsDataGather) SwaggerDoc() map[string]string { + return map_InsightsDataGather +} + +var map_InsightsDataGatherList = map[string]string{ + "": "InsightsDataGatherList is a collection of items Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the required standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the required list of InsightsDataGather objects it may not exceed 100 items", +} + +func (InsightsDataGatherList) SwaggerDoc() map[string]string { + return map_InsightsDataGatherList +} + +var map_InsightsDataGatherSpec = map[string]string{ + "": "InsightsDataGatherSpec contains the configuration for the data gathering.", + "gatherConfig": "gatherConfig is a required spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.", +} + +func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { + return map_InsightsDataGatherSpec +} + +var map_PersistentVolumeClaimReference = map[string]string{ + "": "PersistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "name": "name is the name of the PersistentVolumeClaim that will be used to store the Insights data archive. It is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", +} + +func (PersistentVolumeClaimReference) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimReference +} + +var map_PersistentVolumeConfig = map[string]string{ + "": "PersistentVolumeConfig provides configuration options for PersistentVolume storage.", + "claim": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "mountPath": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", +} + +func (PersistentVolumeConfig) SwaggerDoc() map[string]string { + return map_PersistentVolumeConfig +} + +var map_Storage = map[string]string{ + "": "Storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field.", + "persistentVolume": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + var map_AWSKMSConfig = map[string]string{ "": "AWSKMSConfig defines the KMS config specific to AWS KMS provider", "keyARN": "keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. The value must adhere to the format `arn:aws:kms:::key/`, where: - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - `` is a 12-digit numeric identifier for the AWS account. - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens.", diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index c048c64ef..f6d4cd342 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -81,14 +81,19 @@ type ClusterMonitoringSpec struct { // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. // The current default value is `Disabled`. // +optional - UserDefined *UserDefinedMonitoring `json:"userDefined,omitempty"` + UserDefined UserDefinedMonitoring `json:"userDefined,omitempty,omitzero"` // alertmanagerConfig allows users to configure how the default Alertmanager instance // should be deployed in the `openshift-monitoring` namespace. // alertmanagerConfig is optional. // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. // The current default value is `DefaultConfig`. // +optional - AlertmanagerConfig *AlertmanagerConfig `json:"alertmanagerConfig,omitempty"` + AlertmanagerConfig AlertmanagerConfig `json:"alertmanagerConfig,omitempty,omitzero"` + // metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. + // Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // +optional + MetricsServerConfig MetricsServerConfig `json:"metricsServerConfig,omitempty,omitzero"` } // UserDefinedMonitoring config for user-defined projects. @@ -128,12 +133,12 @@ type AlertmanagerConfig struct { // // +unionDiscriminator // +required - DeploymentMode AlertManagerDeployMode `json:"deploymentMode"` + DeploymentMode AlertManagerDeployMode `json:"deploymentMode,omitempty"` // customConfig must be set when deploymentMode is CustomConfig, and must be unset otherwise. // When set to CustomConfig, the Alertmanager will be deployed with custom configuration. // +optional - CustomConfig *AlertmanagerCustomConfig `json:"customConfig,omitempty"` + CustomConfig AlertmanagerCustomConfig `json:"customConfig,omitempty,omitzero"` } // AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. @@ -153,7 +158,7 @@ type AlertmanagerCustomConfig struct { // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. // The current default value is `Info`. // +optional - LogLevel LogLevel `json:"logLevel"` + LogLevel LogLevel `json:"logLevel,omitempty"` // nodeSelector defines the nodes on which the Pods are scheduled // nodeSelector is optional. // @@ -291,9 +296,10 @@ type ContainerResource struct { // This field is required. // name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character. // +required + // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:XValidation:rule="!format.qualifiedName().validate(self).hasValue()",message="name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character" - Name string `json:"name"` + Name string `json:"name,omitempty"` // request is the minimum amount of the resource required (e.g. "2Mi", "1Gi"). // This field is optional. @@ -322,3 +328,135 @@ type ContainerResource struct { // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." // +kubebuilder:validation:MaxLength=63 type SecretName string + +// MetricsServerConfig provides configuration options for the Metrics Server instance +// that runs in the `openshift-monitoring` namespace. Use this configuration to control +// how the Metrics Server instance is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:MinProperties=1 +type MetricsServerConfig struct { + // audit defines the audit configuration used by the Metrics Server instance. + // audit is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + //The current default sets audit.profile to Metadata + // +optional + Audit Audit `json:"audit,omitempty,omitzero"` + // nodeSelector defines the nodes on which the Pods are scheduled + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10 + // Minimum length for this list is 1 + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // verbosity defines the verbosity of log messages for Metrics Server. + // Valid values are Errors, Info, Trace, TraceAll and omitted. + // When set to Errors, only critical messages and errors are logged. + // When set to Info, only basic information messages are logged. + // When set to Trace, information useful for general debugging is logged. + // When set to TraceAll, detailed information about metric scraping is logged. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `Errors` + // +optional + Verbosity VerbosityLevel `json:"verbosity,omitempty,omitzero"` + // resources defines the compute resource requests and limits for the Metrics Server container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 4m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1 + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// AuditProfile defines the audit log level for the Metrics Server. +// +kubebuilder:validation:Enum=None;Metadata;Request;RequestResponse +type AuditProfile string + +const ( + // AuditProfileNone disables audit logging + AuditProfileNone AuditProfile = "None" + // AuditProfileMetadata logs request metadata (requesting user, timestamp, resource, verb, etc.) but not request or response body + AuditProfileMetadata AuditProfile = "Metadata" + // AuditProfileRequest logs event metadata and request body but not response body + AuditProfileRequest AuditProfile = "Request" + // AuditProfileRequestResponse logs event metadata, request and response bodies + AuditProfileRequestResponse AuditProfile = "RequestResponse" +) + +// VerbosityLevel defines the verbosity of log messages for Metrics Server. +// +kubebuilder:validation:Enum=Errors;Info;Trace;TraceAll +type VerbosityLevel string + +const ( + // VerbosityLevelErrors means only critical messages and errors are logged. + VerbosityLevelErrors VerbosityLevel = "Errors" + // VerbosityLevelInfo means basic informational messages are logged. + VerbosityLevelInfo VerbosityLevel = "Info" + // VerbosityLevelTrace means extended information useful for general debugging is logged. + VerbosityLevelTrace VerbosityLevel = "Trace" + // VerbosityLevelTraceAll means detailed information about metric scraping operations is logged. + VerbosityLevelTraceAll VerbosityLevel = "TraceAll" +) + +// Audit profile configurations +type Audit struct { + // profile is a required field for configuring the audit log level of the Kubernetes Metrics Server. + // Allowed values are None, Metadata, Request, or RequestResponse. + // When set to None, audit logging is disabled and no audit events are recorded. + // When set to Metadata, only request metadata (such as requesting user, timestamp, resource, verb, etc.) is logged, but not the request or response body. + // When set to Request, event metadata and the request body are logged, but not the response body. + // When set to RequestResponse, event metadata, request body, and response body are all logged, providing the most detailed audit information. + // + // See: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy + // for more information about auditing and log levels. + // +required + Profile AuditProfile `json:"profile,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index 144b173f6..6549f6cbe 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -14,11 +14,7 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AlertmanagerConfig) DeepCopyInto(out *AlertmanagerConfig) { *out = *in - if in.CustomConfig != nil { - in, out := &in.CustomConfig, &out.CustomConfig - *out = new(AlertmanagerCustomConfig) - (*in).DeepCopyInto(*out) - } + in.CustomConfig.DeepCopyInto(&out.CustomConfig) return } @@ -86,6 +82,22 @@ func (in *AlertmanagerCustomConfig) DeepCopy() *AlertmanagerCustomConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backup) DeepCopyInto(out *Backup) { *out = *in @@ -350,16 +362,9 @@ func (in *ClusterMonitoringList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { *out = *in - if in.UserDefined != nil { - in, out := &in.UserDefined, &out.UserDefined - *out = new(UserDefinedMonitoring) - **out = **in - } - if in.AlertmanagerConfig != nil { - in, out := &in.AlertmanagerConfig, &out.AlertmanagerConfig - *out = new(AlertmanagerConfig) - (*in).DeepCopyInto(*out) - } + out.UserDefined = in.UserDefined + in.AlertmanagerConfig.DeepCopyInto(&out.AlertmanagerConfig) + in.MetricsServerConfig.DeepCopyInto(&out.MetricsServerConfig) return } @@ -677,6 +682,51 @@ func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsServerConfig) DeepCopyInto(out *MetricsServerConfig) { + *out = *in + out.Audit = in.Audit + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServerConfig. +func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { + if in == nil { + return nil + } + out := new(MetricsServerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PKI) DeepCopyInto(out *PKI) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index b6ff150fc..6ba6ad11f 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -143,6 +143,15 @@ func (AlertmanagerCustomConfig) SwaggerDoc() map[string]string { return map_AlertmanagerCustomConfig } +var map_Audit = map[string]string{ + "": "Audit profile configurations", + "profile": "profile is a required field for configuring the audit log level of the Kubernetes Metrics Server. Allowed values are None, Metadata, Request, or RequestResponse. When set to None, audit logging is disabled and no audit events are recorded. When set to Metadata, only request metadata (such as requesting user, timestamp, resource, verb, etc.) is logged, but not the request or response body. When set to Request, event metadata and the request body are logged, but not the response body. When set to RequestResponse, event metadata, request body, and response body are all logged, providing the most detailed audit information.\n\nSee: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy for more information about auditing and log levels.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + var map_ClusterMonitoring = map[string]string{ "": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API", "metadata": "metadata is the standard object metadata.", @@ -165,9 +174,10 @@ func (ClusterMonitoringList) SwaggerDoc() map[string]string { } var map_ClusterMonitoringSpec = map[string]string{ - "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", - "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", - "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", + "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", + "metricsServerConfig": "metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", } func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { @@ -193,6 +203,20 @@ func (ContainerResource) SwaggerDoc() map[string]string { return map_ContainerResource } +var map_MetricsServerConfig = map[string]string{ + "": "MetricsServerConfig provides configuration options for the Metrics Server instance that runs in the `openshift-monitoring` namespace. Use this configuration to control how the Metrics Server instance is deployed, how it logs, and how its pods are scheduled.", + "audit": "audit defines the audit configuration used by the Metrics Server instance. audit is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default sets audit.profile to Metadata", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", + "verbosity": "verbosity defines the verbosity of log messages for Metrics Server. Valid values are Errors, Info, Trace, TraceAll and omitted. When set to Errors, only critical messages and errors are logged. When set to Info, only basic information messages are logged. When set to Trace, information useful for general debugging is logged. When set to TraceAll, detailed information about metric scraping is logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Errors`", + "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", +} + +func (MetricsServerConfig) SwaggerDoc() map[string]string { + return map_MetricsServerConfig +} + var map_UserDefinedMonitoring = map[string]string{ "": "UserDefinedMonitoring config for user-defined projects.", "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. The current default value is `Disabled`.", diff --git a/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go b/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go index 90c32815c..cd61e14a8 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go @@ -18,7 +18,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:metadata:annotations="description=Extension for configuring openshift web console command line interface (CLI) downloads." // +kubebuilder:metadata:annotations="displayName=ConsoleCLIDownload" // +kubebuilder:printcolumn:name=Display name,JSONPath=.spec.displayName,type=string -// +kubebuilder:printcolumn:name=Age,JSONPath=.metadata.creationTimestamp,type=string +// +kubebuilder:printcolumn:name=Age,JSONPath=.metadata.creationTimestamp,type=date // +openshift:compatibility-gen:level=2 type ConsoleCLIDownload struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/console/v1/types_console_link.go b/vendor/github.com/openshift/api/console/v1/types_console_link.go index 977fcbda9..a84572925 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_link.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_link.go @@ -19,7 +19,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +kubebuilder:metadata:annotations="displayName=ConsoleLinks" // +kubebuilder:printcolumn:name=Text,JSONPath=.spec.text,type=string // +kubebuilder:printcolumn:name=URL,JSONPath=.spec.href,type=string -// +kubebuilder:printcolumn:name=Menu,JSONPath=.spec.menu,type=string +// +kubebuilder:printcolumn:name=Location,JSONPath=.spec.location,type=string // +kubebuilder:printcolumn:name=Age,JSONPath=.metadata.creationTimestamp,type=date // +openshift:compatibility-gen:level=2 type ConsoleLink struct { diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml index 250f873a0..caa676e69 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml @@ -22,7 +22,7 @@ consoleclidownloads.console.openshift.io: type: string - jsonPath: .metadata.creationTimestamp name: Age - type: string + type: date Scope: Cluster ShortNames: null TopLevelFeatureGates: [] @@ -85,8 +85,8 @@ consolelinks.console.openshift.io: - jsonPath: .spec.href name: URL type: string - - jsonPath: .spec.menu - name: Menu + - jsonPath: .spec.location + name: Location type: string - jsonPath: .metadata.creationTimestamp name: Age diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 927568c85..54c9fd3de 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -5,81 +5,85 @@ | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | MultiArchInstallAzure| | | | | | | | ShortCertRotation| | | | | | | -| SigstoreImageVerification| | | | | | | -| NoRegistryClusterOperations| | | | Enabled | | | | BootImageSkewEnforcement| | | Enabled | Enabled | | | -| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | +| ClusterAPIMachineManagementVSphere| | | Enabled | Enabled | | | | Example2| | | Enabled | Enabled | | | | ExternalSnapshotMetadata| | | Enabled | Enabled | | | | NewOLMCatalogdAPIV1Metas| | | | Enabled | | Enabled | | NewOLMOwnSingleNamespace| | | | Enabled | | Enabled | | NewOLMPreflightPermissionChecks| | | | Enabled | | Enabled | | NewOLMWebhookProviderOpenshiftServiceCA| | | | Enabled | | Enabled | -| SELinuxMount| | | Enabled | Enabled | | | +| NoRegistryClusterOperations| | | | Enabled | | Enabled | | VSphereMixedNodeEnv| | | Enabled | Enabled | | | | NewOLM| | Enabled | | Enabled | | Enabled | | AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | +| AWSClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | | AWSDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | +| AWSDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | | AWSServiceLBNetworkSecurityGroup| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | +| AzureClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | | AzureDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | +| AzureDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | | AzureMultiDisk| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | +| CBORServingAndStorage| | | Enabled | Enabled | Enabled | Enabled | +| ClientsAllowCBOR| | | Enabled | Enabled | Enabled | Enabled | +| ClientsPreferCBOR| | | Enabled | Enabled | Enabled | Enabled | | ClusterAPIInstallIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | +| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | | DualReplica| | | Enabled | Enabled | Enabled | Enabled | | DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | +| EventTTL| | | Enabled | Enabled | Enabled | Enabled | | Example| | | Enabled | Enabled | Enabled | Enabled | -| ExternalOIDCWithUIDAndExtraClaimMappings| | | Enabled | Enabled | Enabled | Enabled | | GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | | GCPCustomAPIEndpoints| | | Enabled | Enabled | Enabled | Enabled | -| HighlyAvailableArbiter| | | Enabled | Enabled | Enabled | Enabled | +| GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | Enabled | Enabled | +| GCPDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | | ImageModeStatusReporting| | | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | -| ImageVolume| | | Enabled | Enabled | Enabled | Enabled | | IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | -| InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | -| InsightsRuntimeExtractor| | | Enabled | Enabled | Enabled | Enabled | +| IrreconcilableMachineConfig| | | Enabled | Enabled | Enabled | Enabled | | KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | | MachineAPIMigration| | | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAzure| | | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesCPMS| | | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesvSphere| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | | MultiDiskSetup| | | Enabled | Enabled | Enabled | Enabled | | MutatingAdmissionPolicy| | | Enabled | Enabled | Enabled | Enabled | -| NodeSwap| | | Enabled | Enabled | Enabled | Enabled | | NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | Enabled | Enabled | | PreconfiguredUDNAddresses| | | Enabled | Enabled | Enabled | Enabled | +| SELinuxMount| | | Enabled | Enabled | Enabled | Enabled | | SignatureStores| | | Enabled | Enabled | Enabled | Enabled | | SigstoreImageVerificationPKI| | | Enabled | Enabled | Enabled | Enabled | -| StoragePerformantSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | Enabled | Enabled | -| UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled | | VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | Enabled | Enabled | | VSphereHostVMGroupZonal| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiDisk| | | Enabled | Enabled | Enabled | Enabled | -| VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | -| ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | | AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ChunkSizeMiB| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDC| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDCWithUIDAndExtraClaimMappings| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GCPClusterHostedDNSInstall| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | GatewayAPI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | GatewayAPIController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| IngressControllerLBSubnetsAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| HighlyAvailableArbiter| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ImageVolume| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MachineConfigNodes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -88,14 +92,17 @@ | NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| OnClusterBuild| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| SetEIPForNLBIngressController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerification| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| StoragePerformantSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UpgradeStatus| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | UserNamespacesPodSecurityStandards| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VolumeAttributesClass| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index e4f531946..4fea20540 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -215,7 +215,6 @@ type ImageRegistryConfigStorageS3 struct { // The minimum value is 5 and the maximum value is 5120 (5 GiB). // +kubebuilder:validation:Minimum=5 // +kubebuilder:validation:Maximum=5120 - // +openshift:enable:FeatureGate=ChunkSizeMiB // +optional ChunkSizeMiB int32 `json:"chunkSizeMiB,omitempty"` // encrypt specifies whether the registry stores the image in encrypted diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml index a6ccc2262..95613c7ae 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml @@ -4,8 +4,7 @@ configs.imageregistry.operator.openshift.io: CRDName: configs.imageregistry.operator.openshift.io Capability: "" Category: "" - FeatureGates: - - ChunkSizeMiB + FeatureGates: [] FilenameOperatorName: "" FilenameOperatorOrdering: "00" FilenameRunLevel: "" diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go index c0e03c233..f2db8e9cc 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/types.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go @@ -801,7 +801,7 @@ type TokenConfig struct { // accessTokenInactivityTimeoutSeconds defined the default token // inactivity timeout for tokens granted by any client. // Setting it to nil means the feature is completely disabled (default) - // The default setting can be overriden on OAuthClient basis. + // The default setting can be overridden on OAuthClient basis. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not // used within this temporal window. The user will need to acquire a new diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go index 42444e8ae..a915c0042 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -927,7 +927,7 @@ var map_TokenConfig = map[string]string{ "": "TokenConfig holds the necessary configuration options for authorization and access tokens", "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", - "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overridden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", } func (TokenConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index 409ffc64e..d7661cf38 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -174,7 +174,7 @@ type OpenShiftMachineV1Beta1MachineTemplate struct { // The ProviderSpec within contains platform specific details // for creating the Control Plane Machines. // The ProviderSe should be complete apart from the platform specific - // failure domain field. This will be overriden when the Machines + // failure domain field. This will be overridden when the Machines // are created based on the FailureDomains field. // +required Spec machinev1beta1.MachineSpec `json:"spec"` diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go index c0b8c4ce4..2e35df7e2 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -280,7 +280,7 @@ var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{ "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.", "failureDomains": "failureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", "metadata": "ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.", - "spec": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", + "spec": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overridden when the Machines are created based on the FailureDomains field.", } func (OpenShiftMachineV1Beta1MachineTemplate) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index db15df2cc..b3b38bc6c 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -17,6 +17,13 @@ type AWSMachineProviderConfig struct { AMI AWSResourceReference `json:"ami"` // instanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType"` + // cpuOptions defines CPU-related settings for the instance, including the confidential computing policy. + // When omitted, this means no opinion and the AWS platform is left to choose a reasonable default. + // More info: + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CpuOptionsRequest.html, + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cpu-options-supported-instances-values.html + // +optional + CPUOptions *CPUOptions `json:"cpuOptions,omitempty,omitzero"` // tags is the set of tags to add to apply to an instance, in addition to the ones // added by default by the actuator. These tags are additive. The actuator will ensure // these tags are present, but will not remove any other tags that may exist on the @@ -109,6 +116,37 @@ type AWSMachineProviderConfig struct { MarketType MarketType `json:"marketType,omitempty"` } +// AWSConfidentialComputePolicy represents the confidential compute configuration for the instance. +// +kubebuilder:validation:Enum=Disabled;AMDEncryptedVirtualizationNestedPaging +type AWSConfidentialComputePolicy string + +const ( + // AWSConfidentialComputePolicyDisabled disables confidential computing for the instance. + AWSConfidentialComputePolicyDisabled AWSConfidentialComputePolicy = "Disabled" + // AWSConfidentialComputePolicySEVSNP enables AMD SEV-SNP as the confidential computing technology for the instance. + AWSConfidentialComputePolicySEVSNP AWSConfidentialComputePolicy = "AMDEncryptedVirtualizationNestedPaging" +) + +// CPUOptions defines CPU-related settings for the instance, including the confidential computing policy. +// If provided, it must not be empty — at least one field must be set. +// +kubebuilder:validation:MinProperties=1 +type CPUOptions struct { + // confidentialCompute specifies whether confidential computing should be enabled for the instance, + // and, if so, which confidential computing technology to use. + // Valid values are: Disabled, AMDEncryptedVirtualizationNestedPaging and omitted. + // When set to Disabled, confidential computing will be disabled for the instance. + // When set to AMDEncryptedVirtualizationNestedPaging, AMD SEV-SNP will be used as the confidential computing technology for the instance. + // In this case, ensure the following conditions are met: + // 1) The selected instance type supports AMD SEV-SNP. + // 2) The selected AWS region supports AMD SEV-SNP. + // 3) The selected AMI supports AMD SEV-SNP. + // More details can be checked at https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html + // When omitted, this means no opinion and the AWS platform is left to choose a reasonable default, + // which is subject to change without notice. The current default is Disabled. + // +optional + ConfidentialCompute *AWSConfidentialComputePolicy `json:"confidentialCompute,omitempty"` +} + // BlockDeviceMappingSpec describes a block device mapping type BlockDeviceMappingSpec struct { // The device name exposed to the machine (for example, /dev/sdh or xvdh). diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go index 00dbebc9e..f80d716a0 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -76,6 +76,7 @@ type MachineHealthCheckSpec struct { // Expects either a postive integer value or a percentage value. // Percentage values must be positive whole numbers and are capped at 100%. // Both 0 and 0% are valid and will block all remediation. + // Defaults to 100% if not set. // +kubebuilder:default:="100%" // +kubebuilder:validation:XIntOrString // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index 7763435a9..5aa4f90a4 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -18,6 +18,11 @@ func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.AMI.DeepCopyInto(&out.AMI) + if in.CPUOptions != nil { + in, out := &in.CPUOptions, &out.CPUOptions + *out = new(CPUOptions) + (*in).DeepCopyInto(*out) + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]TagSpecification, len(*in)) @@ -411,6 +416,27 @@ func (in *BlockDeviceMappingSpec) DeepCopy() *BlockDeviceMappingSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUOptions) DeepCopyInto(out *CPUOptions) { + *out = *in + if in.ConfidentialCompute != nil { + in, out := &in.ConfidentialCompute, &out.ConfidentialCompute + *out = new(AWSConfidentialComputePolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUOptions. +func (in *CPUOptions) DeepCopy() *CPUOptions { + if in == nil { + return nil + } + out := new(CPUOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Condition) DeepCopyInto(out *Condition) { *out = *in diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 2667a0aa2..4a1b969a8 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -15,6 +15,7 @@ var map_AWSMachineProviderConfig = map[string]string{ "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "ami": "ami is the reference to the AMI from which to create the machine instance.", "instanceType": "instanceType is the type of instance to create. Example: m4.xlarge", + "cpuOptions": "cpuOptions defines CPU-related settings for the instance, including the confidential computing policy. When omitted, this means no opinion and the AWS platform is left to choose a reasonable default. More info: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CpuOptionsRequest.html, https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cpu-options-supported-instances-values.html", "tags": "tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", "iamInstanceProfile": "iamInstanceProfile is a reference to an IAM role to assign to the instance", "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", @@ -82,6 +83,15 @@ func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { return map_BlockDeviceMappingSpec } +var map_CPUOptions = map[string]string{ + "": "CPUOptions defines CPU-related settings for the instance, including the confidential computing policy. If provided, it must not be empty — at least one field must be set.", + "confidentialCompute": "confidentialCompute specifies whether confidential computing should be enabled for the instance, and, if so, which confidential computing technology to use. Valid values are: Disabled, AMDEncryptedVirtualizationNestedPaging and omitted. When set to Disabled, confidential computing will be disabled for the instance. When set to AMDEncryptedVirtualizationNestedPaging, AMD SEV-SNP will be used as the confidential computing technology for the instance. In this case, ensure the following conditions are met: 1) The selected instance type supports AMD SEV-SNP. 2) The selected AWS region supports AMD SEV-SNP. 3) The selected AMI supports AMD SEV-SNP. More details can be checked at https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html When omitted, this means no opinion and the AWS platform is left to choose a reasonable default, which is subject to change without notice. The current default is Disabled.", +} + +func (CPUOptions) SwaggerDoc() map[string]string { + return map_CPUOptions +} + var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", @@ -625,7 +635,7 @@ var map_MachineHealthCheckSpec = map[string]string{ "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", "unhealthyConditions": "unhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", - "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", + "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. Defaults to 100% if not set.", "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", "remediationTemplate": "remediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", } diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 35b50a8fb..2dac08f09 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -460,7 +460,7 @@ var ( type CIDR string // LoadBalancerStrategy holds parameters for a load balancer. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=SetEIPForNLBIngressController,rule="!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.aws) || !has(self.providerParameters.aws.networkLoadBalancer) || !has(self.providerParameters.aws.networkLoadBalancer.eipAllocations)",message="eipAllocations are forbidden when the scope is Internal." +// +kubebuilder:validation:XValidation:rule="!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.aws) || !has(self.providerParameters.aws.networkLoadBalancer) || !has(self.providerParameters.aws.networkLoadBalancer.eipAllocations)",message="eipAllocations are forbidden when the scope is Internal." // +kubebuilder:validation:XValidation:rule=`!has(self.scope) || self.scope != 'Internal' || !has(self.providerParameters) || !has(self.providerParameters.openstack) || !has(self.providerParameters.openstack.floatingIP) || self.providerParameters.openstack.floatingIP == ""`,message="cannot specify a floating ip when scope is internal" type LoadBalancerStrategy struct { // scope indicates the scope at which the load balancer is exposed. @@ -797,15 +797,14 @@ type AWSClassicLoadBalancerParameters struct { // in the status of the IngressController object. // // +optional - // +openshift:enable:FeatureGate=IngressControllerLBSubnetsAWS Subnets *AWSSubnets `json:"subnets,omitempty"` } // AWSNetworkLoadBalancerParameters holds configuration parameters for an // AWS Network load balancer. For Example: Setting AWS EIPs https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html -// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.ids) && has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids + self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" -// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.ids) && !has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" -// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=SetEIPForNLBIngressController;IngressControllerLBSubnetsAWS,rule=`has(self.subnets) && has(self.subnets.names) && !has(self.subnets.ids) && has(self.eipAllocations) ? size(self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +kubebuilder:validation:XValidation:rule=`has(self.subnets) && has(self.subnets.ids) && has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids + self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +kubebuilder:validation:XValidation:rule=`has(self.subnets) && has(self.subnets.ids) && !has(self.subnets.names) && has(self.eipAllocations) ? size(self.subnets.ids) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" +// +kubebuilder:validation:XValidation:rule=`has(self.subnets) && has(self.subnets.names) && !has(self.subnets.ids) && has(self.eipAllocations) ? size(self.subnets.names) == size(self.eipAllocations) : true`,message="number of subnets must be equal to number of eipAllocations" type AWSNetworkLoadBalancerParameters struct { // subnets specifies the subnets to which the load balancer will // attach. The subnets may be specified by either their @@ -821,7 +820,6 @@ type AWSNetworkLoadBalancerParameters struct { // in the status of the IngressController object. // // +optional - // +openshift:enable:FeatureGate=IngressControllerLBSubnetsAWS Subnets *AWSSubnets `json:"subnets,omitempty"` // eipAllocations is a list of IDs for Elastic IP (EIP) addresses that @@ -837,7 +835,6 @@ type AWSNetworkLoadBalancerParameters struct { // See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html for general // information about configuration, characteristics, and limitations of Elastic IP addresses. // - // +openshift:enable:FeatureGate=SetEIPForNLBIngressController // +optional // +listType=atomic // +kubebuilder:validation:XValidation:rule=`self.all(x, self.exists_one(y, x == y))`,message="eipAllocations cannot contain duplicates" diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index 7d468755a..1461f11a1 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -35,6 +35,21 @@ type KubeAPIServer struct { type KubeAPIServerSpec struct { StaticPodOperatorSpec `json:",inline"` + + // eventTTLMinutes specifies the amount of time that the events are stored before being deleted. + // The TTL is allowed between 5 minutes minimum up to a maximum of 180 minutes (3 hours). + // + // Lowering this value will reduce the storage required in etcd. Note that this setting will only apply + // to new events being created and will not update existing events. + // + // When omitted this means no opinion, and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default value is 3h (180 minutes). + // + // +openshift:enable:FeatureGate=EventTTL + // +kubebuilder:validation:Minimum=5 + // +kubebuilder:validation:Maximum=180 + // +optional + EventTTLMinutes int32 `json:"eventTTLMinutes,omitempty"` } type KubeAPIServerStatus struct { @@ -46,6 +61,7 @@ type KubeAPIServerStatus struct { // The default expiration for the items is set by the platform and it defaults to 24h. // see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection // +optional + // +listType=atomic ServiceAccountIssuers []ServiceAccountIssuerStatus `json:"serviceAccountIssuers,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 2d88bcd77..c6bcd22bc 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -17,6 +17,9 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? self.?spec.managedBootImages.hasValue() || self.?status.managedBootImagesStatus.hasValue() : true",message="when skew enforcement is in Automatic mode, a boot image configuration is required" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || self.spec.managedBootImages.machineManagers.exists(m, m.selection.mode == 'All' && m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io') : true",message="when skew enforcement is in Automatic mode, managedBootImages must contain a MachineManager opting in all MachineAPI MachineSets" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=BootImageSkewEnforcement,rule="self.?status.bootImageSkewEnforcementStatus.mode.orValue(\"\") == 'Automatic' ? !(self.?status.managedBootImagesStatus.machineManagers.hasValue()) || self.status.managedBootImagesStatus.machineManagers.exists(m, m.selection.mode == 'All' && m.resource == 'machinesets' && m.apiGroup == 'machine.openshift.io'): true",message="when skew enforcement is in Automatic mode, managedBootImagesStatus must contain a MachineManager opting in all MachineAPI MachineSets" type MachineConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -36,8 +39,6 @@ type MachineConfiguration struct { type MachineConfigurationSpec struct { StaticPodOperatorSpec `json:",inline"` - // TODO(jkyros): This is where we put our knobs and dials - // managedBootImages allows configuration for the management of boot images for machine // resources within the cluster. This configuration allows users to select resources that should // be updated to the latest boot images during cluster upgrades, ensuring that new machines @@ -55,8 +56,196 @@ type MachineConfigurationSpec struct { // has no effect on cluster upgrades which will still incur node disruption where required. // +optional NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"` + + // irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that + // cannot be applied to existing nodes. + // When specified, the fields configured with validation overrides will no longer reject changes to those + // respective fields due to them not being able to be applied to existing nodes. + // Only newly provisioned nodes will have these configurations applied. + // Existing nodes will report observed configuration differences in their MachineConfigNode status. + // +openshift:enable:FeatureGate=IrreconcilableMachineConfig + // +optional + IrreconcilableValidationOverrides IrreconcilableValidationOverrides `json:"irreconcilableValidationOverrides,omitempty,omitzero"` + + // bootImageSkewEnforcement allows an admin to configure how boot image version skew is + // enforced on the cluster. + // When omitted, this will default to Automatic for clusters that support automatic boot image updates. + // For clusters that do not support automatic boot image updates, cluster upgrades will be disabled until + // a skew enforcement mode has been specified. + // When version skew is being enforced, cluster upgrades will be disabled until the version skew is deemed + // acceptable for the current release payload. + // +openshift:enable:FeatureGate=BootImageSkewEnforcement + // +optional + BootImageSkewEnforcement BootImageSkewEnforcementConfig `json:"bootImageSkewEnforcement,omitempty,omitzero"` +} + +// BootImageSkewEnforcementConfig is used to configure how boot image version skew is enforced on the cluster. +// +kubebuilder:validation:XValidation:rule="has(self.mode) && (self.mode =='Manual') ? has(self.manual) : !has(self.manual)",message="manual is required when mode is Manual, and forbidden otherwise" +// +union +type BootImageSkewEnforcementConfig struct { + // mode determines the underlying behavior of skew enforcement mechanism. + // Valid values are Manual and None. + // Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP + // & RHCOS version associated with the last boot image update in the manual field. + // In Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the + // skew limit described by the release image. + // None means that the MCO will no longer monitor the boot image skew. This may affect + // the cluster's ability to scale. + // This field is required. + // +unionDiscriminator + // +required + Mode BootImageSkewEnforcementConfigMode `json:"mode,omitempty"` + + // manual describes the current boot image of the cluster. + // This should be set to the oldest boot image used amongst all machine resources in the cluster. + // This must include either the RHCOS version of the boot image or the OCP release version which shipped with that + // RHCOS boot image. + // Required when mode is set to "Manual" and forbidden otherwise. + // +optional + Manual ClusterBootImageManual `json:"manual,omitempty,omitzero"` +} + +// ClusterBootImageManual is used to describe the cluster boot image in Manual mode. +// +kubebuilder:validation:XValidation:rule="has(self.mode) && (self.mode =='OCPVersion') ? has(self.ocpVersion) : !has(self.ocpVersion)",message="ocpVersion is required when mode is OCPVersion, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.mode) && (self.mode =='RHCOSVersion') ? has(self.rhcosVersion) : !has(self.rhcosVersion)",message="rhcosVersion is required when mode is RHCOSVersion, and forbidden otherwise" +// +union +type ClusterBootImageManual struct { + // mode is used to configure which boot image field is defined in Manual mode. + // Valid values are OCPVersion and RHCOSVersion. + // OCPVersion means that the cluster admin is expected to set the OCP version associated with the last boot image update + // in the OCPVersion field. + // RHCOSVersion means that the cluster admin is expected to set the RHCOS version associated with the last boot image update + // in the RHCOSVersion field. + // This field is required. + // +unionDiscriminator + // +required + Mode ClusterBootImageManualMode `json:"mode,omitempty"` + + // ocpVersion provides a string which represents the OCP version of the boot image. + // This field must match the OCP semver compatible format of x.y.z. This field must be between + // 5 and 10 characters long. + // Required when mode is set to "OCPVersion" and forbidden otherwise. + // +kubebuilder:validation:XValidation:rule="self.matches('^[0-9]+\\\\.[0-9]+\\\\.[0-9]+$')",message="ocpVersion must match the OCP semver compatible format of x.y.z" + // +kubebuilder:validation:MaxLength:=10 + // +kubebuilder:validation:MinLength:=5 + // +optional + OCPVersion string `json:"ocpVersion,omitempty"` + + // rhcosVersion provides a string which represents the RHCOS version of the boot image + // This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + // format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + // 14 and 21 characters long. + // Required when mode is set to "RHCOSVersion" and forbidden otherwise. + // +kubebuilder:validation:XValidation:rule="self.matches('^[0-9]+\\\\.[0-9]+\\\\.([0-9]{8}|[0-9]{12})-[0-9]+$')",message="rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]" + // +kubebuilder:validation:MaxLength:=21 + // +kubebuilder:validation:MinLength:=14 + // +optional + RHCOSVersion string `json:"rhcosVersion,omitempty"` +} + +// ClusterBootImageManualMode is a string enum used to define the cluster's boot image in manual mode. +// +kubebuilder:validation:Enum:="OCPVersion";"RHCOSVersion" +type ClusterBootImageManualMode string + +const ( + // OCPVersion represents a configuration mode used to define the OCPVersion. + ClusterBootImageSpecModeOCPVersion ClusterBootImageManualMode = "OCPVersion" + + // RHCOSVersion represents a configuration mode used to define the RHCOSVersion. + ClusterBootImageSpecModeRHCOSVersion ClusterBootImageManualMode = "RHCOSVersion" +) + +// BootImageSkewEnforcementStatus is the type for the status object. It represents the cluster defaults when +// the boot image skew enforcement configuration is undefined and reflects the actual configuration when it is defined. +// +kubebuilder:validation:XValidation:rule="has(self.mode) && (self.mode == 'Automatic') ? has(self.automatic) : !has(self.automatic)",message="automatic is required when mode is Automatic, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.mode) && (self.mode == 'Manual') ? has(self.manual) : !has(self.manual)",message="manual is required when mode is Manual, and forbidden otherwise" +// +union +type BootImageSkewEnforcementStatus struct { + // mode determines the underlying behavior of skew enforcement mechanism. + // Valid values are Automatic, Manual and None. + // Automatic means that the MCO will perform boot image updates and store the + // OCP & RHCOS version associated with the last boot image update in the automatic field. + // Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP + // & RHCOS version associated with the last boot image update in the manual field. + // In Automatic and Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the + // skew limit described by the release image. + // None means that the MCO will no longer monitor the boot image skew. This may affect + // the cluster's ability to scale. + // This field is required. + // +unionDiscriminator + // +required + Mode BootImageSkewEnforcementModeStatus `json:"mode,omitempty"` + + // automatic describes the current boot image of the cluster. + // This will be populated by the MCO when performing boot image updates. This value will be compared against + // the cluster's skew limit to determine skew compliance. + // Required when mode is set to "Automatic" and forbidden otherwise. + // +optional + Automatic ClusterBootImageAutomatic `json:"automatic,omitempty,omitzero"` + + // manual describes the current boot image of the cluster. + // This will be populated by the MCO using the values provided in the spec.bootImageSkewEnforcement.manual field. + // This value will be compared against the cluster's skew limit to determine skew compliance. + // Required when mode is set to "Manual" and forbidden otherwise. + // +optional + Manual ClusterBootImageManual `json:"manual,omitempty,omitzero"` +} + +// ClusterBootImageAutomatic is used to describe the cluster boot image in Automatic mode. It stores the RHCOS version of the +// boot image and the OCP release version which shipped with that RHCOS boot image. At least one of these values are required. +// If ocpVersion and rhcosVersion are defined, both values will be used for checking skew compliance. +// If only ocpVersion is defined, only that value will be used for checking skew compliance. +// If only rhcosVersion is defined, only that value will be used for checking skew compliance. +// +kubebuilder:validation:XValidation:rule="has(self.ocpVersion) || has(self.rhcosVersion)",message="at least one of ocpVersion or rhcosVersion is required" +// +kubebuilder:validation:MinProperties=1 +type ClusterBootImageAutomatic struct { + // ocpVersion provides a string which represents the OCP version of the boot image. + // This field must match the OCP semver compatible format of x.y.z. This field must be between + // 5 and 10 characters long. + // +kubebuilder:validation:XValidation:rule="self.matches('^[0-9]+\\\\.[0-9]+\\\\.[0-9]+$')",message="ocpVersion must match the OCP semver compatible format of x.y.z" + // +kubebuilder:validation:MaxLength:=10 + // +kubebuilder:validation:MinLength:=5 + // +optional + OCPVersion string `json:"ocpVersion,omitempty"` + + // rhcosVersion provides a string which represents the RHCOS version of the boot image + // This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + // format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + // 14 and 21 characters long. + // +kubebuilder:validation:XValidation:rule="self.matches('^[0-9]+\\\\.[0-9]+\\\\.([0-9]{8}|[0-9]{12})-[0-9]+$')",message="rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]" + // +kubebuilder:validation:MaxLength:=21 + // +kubebuilder:validation:MinLength:=14 + // +optional + RHCOSVersion string `json:"rhcosVersion,omitempty"` } +// BootImageSkewEnforcementConfigMode is a string enum used to configure the cluster's boot image skew enforcement mode. +// +kubebuilder:validation:Enum:="Manual";"None" +type BootImageSkewEnforcementConfigMode string + +const ( + // Manual represents a configuration mode that allows manual skew enforcement. + BootImageSkewEnforcementConfigModeManual BootImageSkewEnforcementConfigMode = "Manual" + + // None represents a configuration mode that disables boot image skew enforcement. + BootImageSkewEnforcementConfigModeNone BootImageSkewEnforcementConfigMode = "None" +) + +// BootImageSkewEnforcementModeStatus is a string enum used to indicate the cluster's boot image skew enforcement mode. +// +kubebuilder:validation:Enum:="Automatic";"Manual";"None" +type BootImageSkewEnforcementModeStatus string + +const ( + // Automatic represents a configuration mode that allows automatic skew enforcement. + BootImageSkewEnforcementModeStatusAutomatic BootImageSkewEnforcementModeStatus = "Automatic" + + // Manual represents a configuration mode that allows manual skew enforcement. + BootImageSkewEnforcementModeStatusManual BootImageSkewEnforcementModeStatus = "Manual" + + // None represents a configuration mode that disables boot image skew enforcement. + BootImageSkewEnforcementModeStatusNone BootImageSkewEnforcementModeStatus = "None" +) + type MachineConfigurationStatus struct { // observedGeneration is the last generation change you've dealt with // +optional @@ -101,6 +290,16 @@ type MachineConfigurationStatus struct { // +openshift:enable:FeatureGate=ManagedBootImages // +optional ManagedBootImagesStatus ManagedBootImages `json:"managedBootImagesStatus"` + + // bootImageSkewEnforcementStatus reflects what the latest cluster-validated boot image skew enforcement + // configuration is and will be used by Machine Config Controller while performing boot image skew enforcement. + // When omitted, the MCO has no knowledge of how to enforce boot image skew. When the MCO does not know how + // boot image skew should be enforced, cluster upgrades will be blocked until it can either automatically + // determine skew enforcement or there is an explicit skew enforcement configuration provided in the + // spec.bootImageSkewEnforcement field. + // +openshift:enable:FeatureGate=BootImageSkewEnforcement + // +optional + BootImageSkewEnforcementStatus BootImageSkewEnforcementStatus `json:"bootImageSkewEnforcementStatus,omitempty,omitzero"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -120,6 +319,40 @@ type MachineConfigurationList struct { Items []MachineConfiguration `json:"items"` } +// IrreconcilableValidationOverridesStorage defines available storage irreconcilable overrides. +// +kubebuilder:validation:Enum=Disks;FileSystems;Raid +type IrreconcilableValidationOverridesStorage string + +const ( + // Disks enables changes to the `spec.config.storage.disks` section of MachineConfig CRs. + IrreconcilableValidationOverridesStorageDisks IrreconcilableValidationOverridesStorage = "Disks" + + // FileSystems enables changes to the `spec.config.storage.filesystems` section of MachineConfig CRs. + IrreconcilableValidationOverridesStorageFileSystems IrreconcilableValidationOverridesStorage = "FileSystems" + + // Raid enables changes to the `spec.config.storage.raid` section of MachineConfig CRs. + IrreconcilableValidationOverridesStorageRaid IrreconcilableValidationOverridesStorage = "Raid" +) + +// IrreconcilableValidationOverrides holds the irreconcilable validations overrides to be applied on each rendered +// MachineConfig generation. +// +kubebuilder:validation:MinProperties=1 +type IrreconcilableValidationOverrides struct { + // storage can be used to allow making irreconcilable changes to the selected sections under the + // `spec.config.storage` field of MachineConfig CRs + // It must have at least one item, may not exceed 3 items and must not contain duplicates. + // Allowed element values are "Disks", "FileSystems", "Raid" and omitted. + // When contains "Disks" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. + // When contains "FileSystems" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. + // When contains "Raid" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. + // When omitted changes to the `spec.config.storage` section are forbidden. + // +optional + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=3 + Storage []IrreconcilableValidationOverridesStorage `json:"storage,omitempty,omitzero"` +} + type ManagedBootImages struct { // machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator // will watch for changes to this list. Only one entry is permitted per type of machine management resource. @@ -133,10 +366,12 @@ type ManagedBootImages struct { // MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information // such as the resource type and the API Group of the resource. It also provides granular control via the selection field. +// +openshift:validation:FeatureGateAwareXValidation:requiredFeatureGate=ManagedBootImages;ManagedBootImagesCPMS,rule="self.resource != 'controlplanemachinesets' || self.selection.mode == 'All' || self.selection.mode == 'None'", message="Only All or None selection mode is permitted for ControlPlaneMachineSets" type MachineManager struct { // resource is the machine management resource's type. - // The only current valid value is machinesets. + // Valid values are machinesets and controlplanemachinesets. // machinesets means that the machine manager will only register resources of the kind MachineSet. + // controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. // +required Resource MachineManagerMachineSetsResourceType `json:"resource"` @@ -155,9 +390,10 @@ type MachineManager struct { // +union type MachineManagerSelector struct { // mode determines how machine managers will be selected for updates. - // Valid values are All and Partial. + // Valid values are All, Partial and None. // All means that every resource matched by the machine manager will be updated. // Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + // Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. // None means that every resource matched by the machine manager will not be updated. // +unionDiscriminator // +required @@ -194,12 +430,15 @@ const ( // MachineManagerManagedResourceType is a string enum used in the MachineManager type to describe the resource // type to be registered. -// +kubebuilder:validation:Enum:="machinesets" +// +openshift:validation:FeatureGateAwareEnum:requiredFeatureGate=ManagedBootImages,enum=machinesets +// +openshift:validation:FeatureGateAwareEnum:requiredFeatureGate=ManagedBootImages;ManagedBootImagesCPMS,enum=machinesets;controlplanemachinesets type MachineManagerMachineSetsResourceType string const ( // MachineSets represent the MachineSet resource type, which manage a group of machines and belong to the Openshift machine API group. MachineSets MachineManagerMachineSetsResourceType = "machinesets" + // ControlPlaneMachineSets represent the ControlPlaneMachineSets resource type, which manage a group of control-plane machines and belong to the Openshift machine API group. + ControlPlaneMachineSets MachineManagerMachineSetsResourceType = "controlplanemachinesets" ) // MachineManagerManagedAPIGroupType is a string enum used in in the MachineManager type to describe the APIGroup @@ -209,7 +448,7 @@ type MachineManagerMachineSetsAPIGroupType string const ( // MachineAPI represent the traditional MAPI Group that a machineset may belong to. - // This feature only supports MAPI machinesets at this time. + // This feature only supports MAPI machinesets and controlplanemachinesets at this time. MachineAPI MachineManagerMachineSetsAPIGroupType = "machine.openshift.io" ) diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..ba2587dc9 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-CustomNoUpgrade.crd.yaml @@ -0,0 +1,349 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeAPIServer + listKind: KubeAPIServerList + plural: kubeapiservers + singular: kubeapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + KubeAPIServer provides information to configure an operator to manage kube-apiserver. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes API Server + properties: + eventTTLMinutes: + description: |- + eventTTLMinutes specifies the amount of time that the events are stored before being deleted. + The TTL is allowed between 5 minutes minimum up to a maximum of 180 minutes (3 hours). + + Lowering this value will reduce the storage required in etcd. Note that this setting will only apply + to new events being created and will not update existing events. + + When omitted this means no opinion, and the platform is left to choose a reasonable default, which is subject to change over time. + The current default value is 3h (180 minutes). + format: int32 + maximum: 180 + minimum: 5 + type: integer + failedRevisionLimit: + description: |- + failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: |- + forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: |- + logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for their operands. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: |- + observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: |- + succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: |- + unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + Red Hat does not support the use of this field. + Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + Seek guidance from the Red Hat support before using this field. + Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + type: string + reason: + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + required: + - group + - name + - namespace + - resource + type: object + type: array + x-kubernetes-list-map-keys: + - group + - resource + - namespace + - name + x-kubernetes-list-type: map + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: |- + currentRevision is the generation of the most recently successful deployment. + Can not be set on creation of a nodeStatus. Updates must only increase the value. + format: int32 + type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf + lastFailedCount: + description: lastFailedCount is how often the installer pod + of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure + reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable + errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + x-kubernetes-list-type: atomic + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a + previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: |- + targetRevision is the generation of the deployment we're trying to apply. + Can not be set on creation of a nodeStatus. + format: int32 + type: integer + required: + - nodeName + type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) + - fieldPath: .currentRevision + message: currentRevision can not be set on creation of a nodeStatus + optionalOldSelf: true + rule: oldSelf.hasValue() || !has(self.currentRevision) + - fieldPath: .targetRevision + message: targetRevision can not be set on creation of a nodeStatus + optionalOldSelf: true + rule: oldSelf.hasValue() || !has(self.targetRevision) + type: array + x-kubernetes-list-map-keys: + - nodeName + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + serviceAccountIssuers: + description: |- + serviceAccountIssuers tracks history of used service account issuers. + The item without expiration time represents the currently used service account issuer. + The other items represents service account issuers that were used previously and are still being trusted. + The default expiration for the items is set by the platform and it defaults to 24h. + see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection + items: + properties: + expirationTime: + description: |- + expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list + of service account issuers. + format: date-time + type: string + name: + description: name is the name of the service account issuer + type: string + type: object + type: array + x-kubernetes-list-type: atomic + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-Default.crd.yaml index a8664362d..3e7c51dc6 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: api.openshift.io/merged-by-featuregates: "true" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default name: kubeapiservers.operator.openshift.io spec: group: operator.openshift.io @@ -320,6 +321,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic version: description: version is the level this availability applies to type: string diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..b0d11038d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,349 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeAPIServer + listKind: KubeAPIServerList + plural: kubeapiservers + singular: kubeapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + KubeAPIServer provides information to configure an operator to manage kube-apiserver. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes API Server + properties: + eventTTLMinutes: + description: |- + eventTTLMinutes specifies the amount of time that the events are stored before being deleted. + The TTL is allowed between 5 minutes minimum up to a maximum of 180 minutes (3 hours). + + Lowering this value will reduce the storage required in etcd. Note that this setting will only apply + to new events being created and will not update existing events. + + When omitted this means no opinion, and the platform is left to choose a reasonable default, which is subject to change over time. + The current default value is 3h (180 minutes). + format: int32 + maximum: 180 + minimum: 5 + type: integer + failedRevisionLimit: + description: |- + failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: |- + forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: |- + logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for their operands. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: |- + observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: |- + succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: |- + unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + Red Hat does not support the use of this field. + Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + Seek guidance from the Red Hat support before using this field. + Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + type: string + reason: + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + required: + - group + - name + - namespace + - resource + type: object + type: array + x-kubernetes-list-map-keys: + - group + - resource + - namespace + - name + x-kubernetes-list-type: map + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: |- + currentRevision is the generation of the most recently successful deployment. + Can not be set on creation of a nodeStatus. Updates must only increase the value. + format: int32 + type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf + lastFailedCount: + description: lastFailedCount is how often the installer pod + of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure + reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable + errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + x-kubernetes-list-type: atomic + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a + previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: |- + targetRevision is the generation of the deployment we're trying to apply. + Can not be set on creation of a nodeStatus. + format: int32 + type: integer + required: + - nodeName + type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) + - fieldPath: .currentRevision + message: currentRevision can not be set on creation of a nodeStatus + optionalOldSelf: true + rule: oldSelf.hasValue() || !has(self.currentRevision) + - fieldPath: .targetRevision + message: targetRevision can not be set on creation of a nodeStatus + optionalOldSelf: true + rule: oldSelf.hasValue() || !has(self.targetRevision) + type: array + x-kubernetes-list-map-keys: + - nodeName + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + serviceAccountIssuers: + description: |- + serviceAccountIssuers tracks history of used service account issuers. + The item without expiration time represents the currently used service account issuer. + The other items represents service account issuers that were used previously and are still being trusted. + The default expiration for the items is set by the platform and it defaults to 24h. + see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection + items: + properties: + expirationTime: + description: |- + expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list + of service account issuers. + format: date-time + type: string + name: + description: name is the name of the service account issuer + type: string + type: object + type: array + x-kubernetes-list-type: atomic + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..48d0d6a70 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,349 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeAPIServer + listKind: KubeAPIServerList + plural: kubeapiservers + singular: kubeapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + KubeAPIServer provides information to configure an operator to manage kube-apiserver. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes API Server + properties: + eventTTLMinutes: + description: |- + eventTTLMinutes specifies the amount of time that the events are stored before being deleted. + The TTL is allowed between 5 minutes minimum up to a maximum of 180 minutes (3 hours). + + Lowering this value will reduce the storage required in etcd. Note that this setting will only apply + to new events being created and will not update existing events. + + When omitted this means no opinion, and the platform is left to choose a reasonable default, which is subject to change over time. + The current default value is 3h (180 minutes). + format: int32 + maximum: 180 + minimum: 5 + type: integer + failedRevisionLimit: + description: |- + failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: |- + forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + this time instead of failing again on the same config. + type: string + logLevel: + default: Normal + description: |- + logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for their operands. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: |- + observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: |- + succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: |- + unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + Red Hat does not support the use of this field. + Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + Seek guidance from the Red Hat support before using this field. + Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + type: string + reason: + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + required: + - group + - name + - namespace + - resource + type: object + type: array + x-kubernetes-list-map-keys: + - group + - resource + - namespace + - name + x-kubernetes-list-type: map + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: |- + currentRevision is the generation of the most recently successful deployment. + Can not be set on creation of a nodeStatus. Updates must only increase the value. + format: int32 + type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf + lastFailedCount: + description: lastFailedCount is how often the installer pod + of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure + reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable + errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + x-kubernetes-list-type: atomic + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a + previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: |- + targetRevision is the generation of the deployment we're trying to apply. + Can not be set on creation of a nodeStatus. + format: int32 + type: integer + required: + - nodeName + type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) + - fieldPath: .currentRevision + message: currentRevision can not be set on creation of a nodeStatus + optionalOldSelf: true + rule: oldSelf.hasValue() || !has(self.currentRevision) + - fieldPath: .targetRevision + message: targetRevision can not be set on creation of a nodeStatus + optionalOldSelf: true + rule: oldSelf.hasValue() || !has(self.targetRevision) + type: array + x-kubernetes-list-map-keys: + - nodeName + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + serviceAccountIssuers: + description: |- + serviceAccountIssuers tracks history of used service account issuers. + The item without expiration time represents the currently used service account issuer. + The other items represents service account issuers that were used previously and are still being trusted. + The default expiration for the items is set by the platform and it defaults to 24h. + see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection + items: + properties: + expirationTime: + description: |- + expirationTime is the time after which this service account issuer will be pruned and removed from the trusted list + of service account issuers. + format: date-time + type: string + name: + description: name is the name of the service account issuer + type: string + type: object + type: array + x-kubernetes-list-type: atomic + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml index dfd9e3ddb..a18cf575e 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml @@ -480,11 +480,9 @@ spec: the Authorizer interface properties: fieldSelector: - description: |- - fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: fieldSelector describes the limitation + on access based on field. It can only limit + access, not broaden it. properties: rawSelector: description: |- @@ -536,11 +534,9 @@ spec: Resource. "*" means all. type: string labelSelector: - description: |- - labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: labelSelector describes the limitation + on access based on labels. It can only + limit access, not broaden it. properties: rawSelector: description: |- @@ -631,11 +627,9 @@ spec: the Authorizer interface properties: fieldSelector: - description: |- - fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: fieldSelector describes the limitation + on access based on field. It can only limit + access, not broaden it. properties: rawSelector: description: |- @@ -687,11 +681,9 @@ spec: Resource. "*" means all. type: string labelSelector: - description: |- - labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: labelSelector describes the limitation + on access based on labels. It can only + limit access, not broaden it. properties: rawSelector: description: |- diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..c165fca6b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-CustomNoUpgrade.crd.yaml @@ -0,0 +1,1543 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: machineconfigurations.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MachineConfiguration + listKind: MachineConfigurationList + plural: machineconfigurations + singular: machineconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + MachineConfiguration provides information to configure an operator to manage Machine Configuration. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Machine Config Operator + properties: + bootImageSkewEnforcement: + description: |- + bootImageSkewEnforcement allows an admin to configure how boot image version skew is + enforced on the cluster. + When omitted, this will default to Automatic for clusters that support automatic boot image updates. + For clusters that do not support automatic boot image updates, cluster upgrades will be disabled until + a skew enforcement mode has been specified. + When version skew is being enforced, cluster upgrades will be disabled until the version skew is deemed + acceptable for the current release payload. + properties: + manual: + description: |- + manual describes the current boot image of the cluster. + This should be set to the oldest boot image used amongst all machine resources in the cluster. + This must include either the RHCOS version of the boot image or the OCP release version which shipped with that + RHCOS boot image. + Required when mode is set to "Manual" and forbidden otherwise. + properties: + mode: + description: |- + mode is used to configure which boot image field is defined in Manual mode. + Valid values are OCPVersion and RHCOSVersion. + OCPVersion means that the cluster admin is expected to set the OCP version associated with the last boot image update + in the OCPVersion field. + RHCOSVersion means that the cluster admin is expected to set the RHCOS version associated with the last boot image update + in the RHCOSVersion field. + This field is required. + enum: + - OCPVersion + - RHCOSVersion + type: string + ocpVersion: + description: |- + ocpVersion provides a string which represents the OCP version of the boot image. + This field must match the OCP semver compatible format of x.y.z. This field must be between + 5 and 10 characters long. + Required when mode is set to "OCPVersion" and forbidden otherwise. + maxLength: 10 + minLength: 5 + type: string + x-kubernetes-validations: + - message: ocpVersion must match the OCP semver compatible + format of x.y.z + rule: self.matches('^[0-9]+\\.[0-9]+\\.[0-9]+$') + rhcosVersion: + description: |- + rhcosVersion provides a string which represents the RHCOS version of the boot image + This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + 14 and 21 characters long. + Required when mode is set to "RHCOSVersion" and forbidden otherwise. + maxLength: 21 + minLength: 14 + type: string + x-kubernetes-validations: + - message: rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] + or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber] + rule: self.matches('^[0-9]+\\.[0-9]+\\.([0-9]{8}|[0-9]{12})-[0-9]+$') + required: + - mode + type: object + x-kubernetes-validations: + - message: ocpVersion is required when mode is OCPVersion, and + forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''OCPVersion'') ? has(self.ocpVersion) + : !has(self.ocpVersion)' + - message: rhcosVersion is required when mode is RHCOSVersion, + and forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''RHCOSVersion'') ? has(self.rhcosVersion) + : !has(self.rhcosVersion)' + mode: + description: |- + mode determines the underlying behavior of skew enforcement mechanism. + Valid values are Manual and None. + Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP + & RHCOS version associated with the last boot image update in the manual field. + In Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the + skew limit described by the release image. + None means that the MCO will no longer monitor the boot image skew. This may affect + the cluster's ability to scale. + This field is required. + enum: + - Manual + - None + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: manual is required when mode is Manual, and forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''Manual'') ? has(self.manual) + : !has(self.manual)' + failedRevisionLimit: + description: |- + failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: |- + forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + this time instead of failing again on the same config. + type: string + irreconcilableValidationOverrides: + description: |- + irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that + cannot be applied to existing nodes. + When specified, the fields configured with validation overrides will no longer reject changes to those + respective fields due to them not being able to be applied to existing nodes. + Only newly provisioned nodes will have these configurations applied. + Existing nodes will report observed configuration differences in their MachineConfigNode status. + minProperties: 1 + properties: + storage: + description: |- + storage can be used to allow making irreconcilable changes to the selected sections under the + `spec.config.storage` field of MachineConfig CRs + It must have at least one item, may not exceed 3 items and must not contain duplicates. + Allowed element values are "Disks", "FileSystems", "Raid" and omitted. + When contains "Disks" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. + When contains "FileSystems" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. + When contains "Raid" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. + When omitted changes to the `spec.config.storage` section are forbidden. + items: + description: IrreconcilableValidationOverridesStorage defines + available storage irreconcilable overrides. + enum: + - Disks + - FileSystems + - Raid + type: string + maxItems: 3 + minItems: 1 + type: array + x-kubernetes-list-type: set + type: object + logLevel: + default: Normal + description: |- + logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for their operands. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managedBootImages: + description: |- + managedBootImages allows configuration for the management of boot images for machine + resources within the cluster. This configuration allows users to select resources that should + be updated to the latest boot images during cluster upgrades, ensuring that new machines + always boot with the current cluster version's boot image. When omitted, this means no opinion + and the platform is left to choose a reasonable default, which is subject to change over time. + The default for each machine manager mode is All for GCP and AWS platforms, and None for all + other platforms. + properties: + machineManagers: + description: |- + machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted per type of machine management resource. + items: + description: |- + MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information + such as the resource type and the API Group of the resource. It also provides granular control via the selection field. + properties: + apiGroup: + description: |- + apiGroup is name of the APIGroup that the machine management resource belongs to. + The only current valid value is machine.openshift.io. + machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: |- + resource is the machine management resource's type. + Valid values are machinesets and controlplanemachinesets. + machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. + enum: + - machinesets + - controlplanemachinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: |- + mode determines how machine managers will be selected for updates. + Valid values are All, Partial and None. + All means that every resource matched by the machine manager will be updated. + Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. + None means that every resource matched by the machine manager will not be updated. + enum: + - All + - Partial + - None + type: string + partial: + description: |- + partial provides label selector(s) that can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' + maxItems: 5 + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodeDisruptionPolicy: + description: |- + nodeDisruptionPolicy allows an admin to set granular node disruption actions for + MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow + for less downtime when doing small configuration updates to the cluster. This configuration + has no effect on cluster upgrades which will still incur node disruption where required. + properties: + files: + description: |- + files is a list of MachineConfig file definitions and actions to take to changes on those paths + This list supports a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecFile is a file entry and + corresponding actions to take and is used in the NodeDisruptionPolicyConfig + object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + path: + description: |- + path is the location of a file being managed through a MachineConfig. + The Actions in the policy will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: |- + sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this + will apply to all sshkey changes in the cluster + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and forbidden + otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? has(self.restart) + : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) == + 1 : true' + - message: None action can only be specified standalone, as + it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + required: + - actions + type: object + units: + description: |- + units is a list MachineConfig unit definitions and actions to take on changes to those services + This list supports a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecUnit is a systemd unit + name and corresponding actions to take and is used in the + NodeDisruptionPolicyConfig object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + name: + description: |- + name represents the service name of a systemd service managed through a MachineConfig + Actions specified will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected format + is ${NAME}${SERVICETYPE}, where {NAME} must be atleast + 1 character long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + observedConfig: + description: |- + observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: |- + succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: |- + unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + Red Hat does not support the use of this field. + Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + Seek guidance from the Red Hat support before using this field. + Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Machine + Config Operator + properties: + bootImageSkewEnforcementStatus: + description: |- + bootImageSkewEnforcementStatus reflects what the latest cluster-validated boot image skew enforcement + configuration is and will be used by Machine Config Controller while performing boot image skew enforcement. + When omitted, the MCO has no knowledge of how to enforce boot image skew. When the MCO does not know how + boot image skew should be enforced, cluster upgrades will be blocked until it can either automatically + determine skew enforcement or there is an explicit skew enforcement configuration provided in the + spec.bootImageSkewEnforcement field. + properties: + automatic: + description: |- + automatic describes the current boot image of the cluster. + This will be populated by the MCO when performing boot image updates. This value will be compared against + the cluster's skew limit to determine skew compliance. + Required when mode is set to "Automatic" and forbidden otherwise. + minProperties: 1 + properties: + ocpVersion: + description: |- + ocpVersion provides a string which represents the OCP version of the boot image. + This field must match the OCP semver compatible format of x.y.z. This field must be between + 5 and 10 characters long. + maxLength: 10 + minLength: 5 + type: string + x-kubernetes-validations: + - message: ocpVersion must match the OCP semver compatible + format of x.y.z + rule: self.matches('^[0-9]+\\.[0-9]+\\.[0-9]+$') + rhcosVersion: + description: |- + rhcosVersion provides a string which represents the RHCOS version of the boot image + This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + 14 and 21 characters long. + maxLength: 21 + minLength: 14 + type: string + x-kubernetes-validations: + - message: rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] + or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber] + rule: self.matches('^[0-9]+\\.[0-9]+\\.([0-9]{8}|[0-9]{12})-[0-9]+$') + type: object + x-kubernetes-validations: + - message: at least one of ocpVersion or rhcosVersion is required + rule: has(self.ocpVersion) || has(self.rhcosVersion) + manual: + description: |- + manual describes the current boot image of the cluster. + This will be populated by the MCO using the values provided in the spec.bootImageSkewEnforcement.manual field. + This value will be compared against the cluster's skew limit to determine skew compliance. + Required when mode is set to "Manual" and forbidden otherwise. + properties: + mode: + description: |- + mode is used to configure which boot image field is defined in Manual mode. + Valid values are OCPVersion and RHCOSVersion. + OCPVersion means that the cluster admin is expected to set the OCP version associated with the last boot image update + in the OCPVersion field. + RHCOSVersion means that the cluster admin is expected to set the RHCOS version associated with the last boot image update + in the RHCOSVersion field. + This field is required. + enum: + - OCPVersion + - RHCOSVersion + type: string + ocpVersion: + description: |- + ocpVersion provides a string which represents the OCP version of the boot image. + This field must match the OCP semver compatible format of x.y.z. This field must be between + 5 and 10 characters long. + Required when mode is set to "OCPVersion" and forbidden otherwise. + maxLength: 10 + minLength: 5 + type: string + x-kubernetes-validations: + - message: ocpVersion must match the OCP semver compatible + format of x.y.z + rule: self.matches('^[0-9]+\\.[0-9]+\\.[0-9]+$') + rhcosVersion: + description: |- + rhcosVersion provides a string which represents the RHCOS version of the boot image + This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + 14 and 21 characters long. + Required when mode is set to "RHCOSVersion" and forbidden otherwise. + maxLength: 21 + minLength: 14 + type: string + x-kubernetes-validations: + - message: rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] + or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber] + rule: self.matches('^[0-9]+\\.[0-9]+\\.([0-9]{8}|[0-9]{12})-[0-9]+$') + required: + - mode + type: object + x-kubernetes-validations: + - message: ocpVersion is required when mode is OCPVersion, and + forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''OCPVersion'') ? has(self.ocpVersion) + : !has(self.ocpVersion)' + - message: rhcosVersion is required when mode is RHCOSVersion, + and forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''RHCOSVersion'') ? has(self.rhcosVersion) + : !has(self.rhcosVersion)' + mode: + description: |- + mode determines the underlying behavior of skew enforcement mechanism. + Valid values are Automatic, Manual and None. + Automatic means that the MCO will perform boot image updates and store the + OCP & RHCOS version associated with the last boot image update in the automatic field. + Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP + & RHCOS version associated with the last boot image update in the manual field. + In Automatic and Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the + skew limit described by the release image. + None means that the MCO will no longer monitor the boot image skew. This may affect + the cluster's ability to scale. + This field is required. + enum: + - Automatic + - Manual + - None + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: automatic is required when mode is Automatic, and forbidden + otherwise + rule: 'has(self.mode) && (self.mode == ''Automatic'') ? has(self.automatic) + : !has(self.automatic)' + - message: manual is required when mode is Manual, and forbidden otherwise + rule: 'has(self.mode) && (self.mode == ''Manual'') ? has(self.manual) + : !has(self.manual)' + conditions: + description: conditions is a list of conditions and their status + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + managedBootImagesStatus: + description: |- + managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is + and will be used by Machine Config Controller while performing boot image updates. + properties: + machineManagers: + description: |- + machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted per type of machine management resource. + items: + description: |- + MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information + such as the resource type and the API Group of the resource. It also provides granular control via the selection field. + properties: + apiGroup: + description: |- + apiGroup is name of the APIGroup that the machine management resource belongs to. + The only current valid value is machine.openshift.io. + machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: |- + resource is the machine management resource's type. + Valid values are machinesets and controlplanemachinesets. + machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. + enum: + - machinesets + - controlplanemachinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: |- + mode determines how machine managers will be selected for updates. + Valid values are All, Partial and None. + All means that every resource matched by the machine manager will be updated. + Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. + None means that every resource matched by the machine manager will not be updated. + enum: + - All + - Partial + - None + type: string + partial: + description: |- + partial provides label selector(s) that can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' + maxItems: 5 + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + nodeDisruptionPolicyStatus: + description: |- + nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, + and will be used by the Machine Config Daemon during future node updates. + properties: + clusterPolicies: + description: clusterPolicies is a merge of cluster default and + user provided node disruption policies. + properties: + files: + description: files is a list of MachineConfig file definitions + and actions to take to changes on those paths + items: + description: NodeDisruptionPolicyStatusFile is a file entry + and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus + object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + path: + description: |- + path is the location of a file being managed through a MachineConfig. + The Actions in the policy will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: sshkey is the overall sshkey MachineConfig definition + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? + has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + required: + - actions + type: object + units: + description: units is a list MachineConfig unit definitions + and actions to take on changes to those services + items: + description: NodeDisruptionPolicyStatusUnit is a systemd + unit name and corresponding actions to take and is used + in the NodeDisruptionPolicyClusterStatus object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + name: + description: |- + name represents the service name of a systemd service managed through a MachineConfig + Actions specified will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", + ".mount", ".automount", ".swap", ".target", ".path", + ".timer",".snapshot", ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: object + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: when skew enforcement is in Automatic mode, a boot image configuration + is required + rule: 'self.?status.bootImageSkewEnforcementStatus.mode.orValue("") == ''Automatic'' + ? self.?spec.managedBootImages.hasValue() || self.?status.managedBootImagesStatus.hasValue() + : true' + - message: when skew enforcement is in Automatic mode, managedBootImages must + contain a MachineManager opting in all MachineAPI MachineSets + rule: 'self.?status.bootImageSkewEnforcementStatus.mode.orValue("") == ''Automatic'' + ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || self.spec.managedBootImages.machineManagers.exists(m, + m.selection.mode == ''All'' && m.resource == ''machinesets'' && m.apiGroup + == ''machine.openshift.io'') : true' + - message: when skew enforcement is in Automatic mode, managedBootImagesStatus + must contain a MachineManager opting in all MachineAPI MachineSets + rule: 'self.?status.bootImageSkewEnforcementStatus.mode.orValue("") == ''Automatic'' + ? !(self.?status.managedBootImagesStatus.machineManagers.hasValue()) || + self.status.managedBootImagesStatus.machineManagers.exists(m, m.selection.mode + == ''All'' && m.resource == ''machinesets'' && m.apiGroup == ''machine.openshift.io''): + true' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml similarity index 98% rename from vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml index 7976d1dab..3de28dcdf 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: api.openshift.io/merged-by-featuregates: "true" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default name: machineconfigurations.operator.openshift.io spec: group: operator.openshift.io @@ -102,8 +103,9 @@ spec: resource: description: |- resource is the machine management resource's type. - The only current valid value is machinesets. + Valid values are machinesets and controlplanemachinesets. machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. enum: - machinesets type: string @@ -115,9 +117,10 @@ spec: mode: description: |- mode determines how machine managers will be selected for updates. - Valid values are All and Partial. + Valid values are All, Partial and None. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. None means that every resource matched by the machine manager will not be updated. enum: - All @@ -733,8 +736,9 @@ spec: resource: description: |- resource is the machine management resource's type. - The only current valid value is machinesets. + Valid values are machinesets and controlplanemachinesets. machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. enum: - machinesets type: string @@ -746,9 +750,10 @@ spec: mode: description: |- mode determines how machine managers will be selected for updates. - Valid values are All and Partial. + Valid values are All, Partial and None. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. None means that every resource matched by the machine manager will not be updated. enum: - All diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..544168f41 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,1543 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: machineconfigurations.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MachineConfiguration + listKind: MachineConfigurationList + plural: machineconfigurations + singular: machineconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + MachineConfiguration provides information to configure an operator to manage Machine Configuration. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Machine Config Operator + properties: + bootImageSkewEnforcement: + description: |- + bootImageSkewEnforcement allows an admin to configure how boot image version skew is + enforced on the cluster. + When omitted, this will default to Automatic for clusters that support automatic boot image updates. + For clusters that do not support automatic boot image updates, cluster upgrades will be disabled until + a skew enforcement mode has been specified. + When version skew is being enforced, cluster upgrades will be disabled until the version skew is deemed + acceptable for the current release payload. + properties: + manual: + description: |- + manual describes the current boot image of the cluster. + This should be set to the oldest boot image used amongst all machine resources in the cluster. + This must include either the RHCOS version of the boot image or the OCP release version which shipped with that + RHCOS boot image. + Required when mode is set to "Manual" and forbidden otherwise. + properties: + mode: + description: |- + mode is used to configure which boot image field is defined in Manual mode. + Valid values are OCPVersion and RHCOSVersion. + OCPVersion means that the cluster admin is expected to set the OCP version associated with the last boot image update + in the OCPVersion field. + RHCOSVersion means that the cluster admin is expected to set the RHCOS version associated with the last boot image update + in the RHCOSVersion field. + This field is required. + enum: + - OCPVersion + - RHCOSVersion + type: string + ocpVersion: + description: |- + ocpVersion provides a string which represents the OCP version of the boot image. + This field must match the OCP semver compatible format of x.y.z. This field must be between + 5 and 10 characters long. + Required when mode is set to "OCPVersion" and forbidden otherwise. + maxLength: 10 + minLength: 5 + type: string + x-kubernetes-validations: + - message: ocpVersion must match the OCP semver compatible + format of x.y.z + rule: self.matches('^[0-9]+\\.[0-9]+\\.[0-9]+$') + rhcosVersion: + description: |- + rhcosVersion provides a string which represents the RHCOS version of the boot image + This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + 14 and 21 characters long. + Required when mode is set to "RHCOSVersion" and forbidden otherwise. + maxLength: 21 + minLength: 14 + type: string + x-kubernetes-validations: + - message: rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] + or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber] + rule: self.matches('^[0-9]+\\.[0-9]+\\.([0-9]{8}|[0-9]{12})-[0-9]+$') + required: + - mode + type: object + x-kubernetes-validations: + - message: ocpVersion is required when mode is OCPVersion, and + forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''OCPVersion'') ? has(self.ocpVersion) + : !has(self.ocpVersion)' + - message: rhcosVersion is required when mode is RHCOSVersion, + and forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''RHCOSVersion'') ? has(self.rhcosVersion) + : !has(self.rhcosVersion)' + mode: + description: |- + mode determines the underlying behavior of skew enforcement mechanism. + Valid values are Manual and None. + Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP + & RHCOS version associated with the last boot image update in the manual field. + In Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the + skew limit described by the release image. + None means that the MCO will no longer monitor the boot image skew. This may affect + the cluster's ability to scale. + This field is required. + enum: + - Manual + - None + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: manual is required when mode is Manual, and forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''Manual'') ? has(self.manual) + : !has(self.manual)' + failedRevisionLimit: + description: |- + failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: |- + forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + this time instead of failing again on the same config. + type: string + irreconcilableValidationOverrides: + description: |- + irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that + cannot be applied to existing nodes. + When specified, the fields configured with validation overrides will no longer reject changes to those + respective fields due to them not being able to be applied to existing nodes. + Only newly provisioned nodes will have these configurations applied. + Existing nodes will report observed configuration differences in their MachineConfigNode status. + minProperties: 1 + properties: + storage: + description: |- + storage can be used to allow making irreconcilable changes to the selected sections under the + `spec.config.storage` field of MachineConfig CRs + It must have at least one item, may not exceed 3 items and must not contain duplicates. + Allowed element values are "Disks", "FileSystems", "Raid" and omitted. + When contains "Disks" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. + When contains "FileSystems" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. + When contains "Raid" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. + When omitted changes to the `spec.config.storage` section are forbidden. + items: + description: IrreconcilableValidationOverridesStorage defines + available storage irreconcilable overrides. + enum: + - Disks + - FileSystems + - Raid + type: string + maxItems: 3 + minItems: 1 + type: array + x-kubernetes-list-type: set + type: object + logLevel: + default: Normal + description: |- + logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for their operands. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managedBootImages: + description: |- + managedBootImages allows configuration for the management of boot images for machine + resources within the cluster. This configuration allows users to select resources that should + be updated to the latest boot images during cluster upgrades, ensuring that new machines + always boot with the current cluster version's boot image. When omitted, this means no opinion + and the platform is left to choose a reasonable default, which is subject to change over time. + The default for each machine manager mode is All for GCP and AWS platforms, and None for all + other platforms. + properties: + machineManagers: + description: |- + machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted per type of machine management resource. + items: + description: |- + MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information + such as the resource type and the API Group of the resource. It also provides granular control via the selection field. + properties: + apiGroup: + description: |- + apiGroup is name of the APIGroup that the machine management resource belongs to. + The only current valid value is machine.openshift.io. + machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: |- + resource is the machine management resource's type. + Valid values are machinesets and controlplanemachinesets. + machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. + enum: + - machinesets + - controlplanemachinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: |- + mode determines how machine managers will be selected for updates. + Valid values are All, Partial and None. + All means that every resource matched by the machine manager will be updated. + Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. + None means that every resource matched by the machine manager will not be updated. + enum: + - All + - Partial + - None + type: string + partial: + description: |- + partial provides label selector(s) that can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' + maxItems: 5 + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodeDisruptionPolicy: + description: |- + nodeDisruptionPolicy allows an admin to set granular node disruption actions for + MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow + for less downtime when doing small configuration updates to the cluster. This configuration + has no effect on cluster upgrades which will still incur node disruption where required. + properties: + files: + description: |- + files is a list of MachineConfig file definitions and actions to take to changes on those paths + This list supports a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecFile is a file entry and + corresponding actions to take and is used in the NodeDisruptionPolicyConfig + object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + path: + description: |- + path is the location of a file being managed through a MachineConfig. + The Actions in the policy will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: |- + sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this + will apply to all sshkey changes in the cluster + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and forbidden + otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? has(self.restart) + : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) == + 1 : true' + - message: None action can only be specified standalone, as + it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + required: + - actions + type: object + units: + description: |- + units is a list MachineConfig unit definitions and actions to take on changes to those services + This list supports a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecUnit is a systemd unit + name and corresponding actions to take and is used in the + NodeDisruptionPolicyConfig object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + name: + description: |- + name represents the service name of a systemd service managed through a MachineConfig + Actions specified will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected format + is ${NAME}${SERVICETYPE}, where {NAME} must be atleast + 1 character long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + observedConfig: + description: |- + observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: |- + succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: |- + unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + Red Hat does not support the use of this field. + Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + Seek guidance from the Red Hat support before using this field. + Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Machine + Config Operator + properties: + bootImageSkewEnforcementStatus: + description: |- + bootImageSkewEnforcementStatus reflects what the latest cluster-validated boot image skew enforcement + configuration is and will be used by Machine Config Controller while performing boot image skew enforcement. + When omitted, the MCO has no knowledge of how to enforce boot image skew. When the MCO does not know how + boot image skew should be enforced, cluster upgrades will be blocked until it can either automatically + determine skew enforcement or there is an explicit skew enforcement configuration provided in the + spec.bootImageSkewEnforcement field. + properties: + automatic: + description: |- + automatic describes the current boot image of the cluster. + This will be populated by the MCO when performing boot image updates. This value will be compared against + the cluster's skew limit to determine skew compliance. + Required when mode is set to "Automatic" and forbidden otherwise. + minProperties: 1 + properties: + ocpVersion: + description: |- + ocpVersion provides a string which represents the OCP version of the boot image. + This field must match the OCP semver compatible format of x.y.z. This field must be between + 5 and 10 characters long. + maxLength: 10 + minLength: 5 + type: string + x-kubernetes-validations: + - message: ocpVersion must match the OCP semver compatible + format of x.y.z + rule: self.matches('^[0-9]+\\.[0-9]+\\.[0-9]+$') + rhcosVersion: + description: |- + rhcosVersion provides a string which represents the RHCOS version of the boot image + This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + 14 and 21 characters long. + maxLength: 21 + minLength: 14 + type: string + x-kubernetes-validations: + - message: rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] + or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber] + rule: self.matches('^[0-9]+\\.[0-9]+\\.([0-9]{8}|[0-9]{12})-[0-9]+$') + type: object + x-kubernetes-validations: + - message: at least one of ocpVersion or rhcosVersion is required + rule: has(self.ocpVersion) || has(self.rhcosVersion) + manual: + description: |- + manual describes the current boot image of the cluster. + This will be populated by the MCO using the values provided in the spec.bootImageSkewEnforcement.manual field. + This value will be compared against the cluster's skew limit to determine skew compliance. + Required when mode is set to "Manual" and forbidden otherwise. + properties: + mode: + description: |- + mode is used to configure which boot image field is defined in Manual mode. + Valid values are OCPVersion and RHCOSVersion. + OCPVersion means that the cluster admin is expected to set the OCP version associated with the last boot image update + in the OCPVersion field. + RHCOSVersion means that the cluster admin is expected to set the RHCOS version associated with the last boot image update + in the RHCOSVersion field. + This field is required. + enum: + - OCPVersion + - RHCOSVersion + type: string + ocpVersion: + description: |- + ocpVersion provides a string which represents the OCP version of the boot image. + This field must match the OCP semver compatible format of x.y.z. This field must be between + 5 and 10 characters long. + Required when mode is set to "OCPVersion" and forbidden otherwise. + maxLength: 10 + minLength: 5 + type: string + x-kubernetes-validations: + - message: ocpVersion must match the OCP semver compatible + format of x.y.z + rule: self.matches('^[0-9]+\\.[0-9]+\\.[0-9]+$') + rhcosVersion: + description: |- + rhcosVersion provides a string which represents the RHCOS version of the boot image + This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy + format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between + 14 and 21 characters long. + Required when mode is set to "RHCOSVersion" and forbidden otherwise. + maxLength: 21 + minLength: 14 + type: string + x-kubernetes-validations: + - message: rhcosVersion must match format [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] + or must match legacy format [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber] + rule: self.matches('^[0-9]+\\.[0-9]+\\.([0-9]{8}|[0-9]{12})-[0-9]+$') + required: + - mode + type: object + x-kubernetes-validations: + - message: ocpVersion is required when mode is OCPVersion, and + forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''OCPVersion'') ? has(self.ocpVersion) + : !has(self.ocpVersion)' + - message: rhcosVersion is required when mode is RHCOSVersion, + and forbidden otherwise + rule: 'has(self.mode) && (self.mode ==''RHCOSVersion'') ? has(self.rhcosVersion) + : !has(self.rhcosVersion)' + mode: + description: |- + mode determines the underlying behavior of skew enforcement mechanism. + Valid values are Automatic, Manual and None. + Automatic means that the MCO will perform boot image updates and store the + OCP & RHCOS version associated with the last boot image update in the automatic field. + Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP + & RHCOS version associated with the last boot image update in the manual field. + In Automatic and Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the + skew limit described by the release image. + None means that the MCO will no longer monitor the boot image skew. This may affect + the cluster's ability to scale. + This field is required. + enum: + - Automatic + - Manual + - None + type: string + required: + - mode + type: object + x-kubernetes-validations: + - message: automatic is required when mode is Automatic, and forbidden + otherwise + rule: 'has(self.mode) && (self.mode == ''Automatic'') ? has(self.automatic) + : !has(self.automatic)' + - message: manual is required when mode is Manual, and forbidden otherwise + rule: 'has(self.mode) && (self.mode == ''Manual'') ? has(self.manual) + : !has(self.manual)' + conditions: + description: conditions is a list of conditions and their status + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + managedBootImagesStatus: + description: |- + managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is + and will be used by Machine Config Controller while performing boot image updates. + properties: + machineManagers: + description: |- + machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted per type of machine management resource. + items: + description: |- + MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information + such as the resource type and the API Group of the resource. It also provides granular control via the selection field. + properties: + apiGroup: + description: |- + apiGroup is name of the APIGroup that the machine management resource belongs to. + The only current valid value is machine.openshift.io. + machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: |- + resource is the machine management resource's type. + Valid values are machinesets and controlplanemachinesets. + machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. + enum: + - machinesets + - controlplanemachinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: |- + mode determines how machine managers will be selected for updates. + Valid values are All, Partial and None. + All means that every resource matched by the machine manager will be updated. + Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. + None means that every resource matched by the machine manager will not be updated. + enum: + - All + - Partial + - None + type: string + partial: + description: |- + partial provides label selector(s) that can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' + maxItems: 5 + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + nodeDisruptionPolicyStatus: + description: |- + nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, + and will be used by the Machine Config Daemon during future node updates. + properties: + clusterPolicies: + description: clusterPolicies is a merge of cluster default and + user provided node disruption policies. + properties: + files: + description: files is a list of MachineConfig file definitions + and actions to take to changes on those paths + items: + description: NodeDisruptionPolicyStatusFile is a file entry + and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus + object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + path: + description: |- + path is the location of a file being managed through a MachineConfig. + The Actions in the policy will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: sshkey is the overall sshkey MachineConfig definition + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? + has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + required: + - actions + type: object + units: + description: units is a list MachineConfig unit definitions + and actions to take on changes to those services + items: + description: NodeDisruptionPolicyStatusUnit is a systemd + unit name and corresponding actions to take and is used + in the NodeDisruptionPolicyClusterStatus object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + name: + description: |- + name represents the service name of a systemd service managed through a MachineConfig + Actions specified will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", + ".mount", ".automount", ".swap", ".target", ".path", + ".timer",".snapshot", ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: object + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: when skew enforcement is in Automatic mode, a boot image configuration + is required + rule: 'self.?status.bootImageSkewEnforcementStatus.mode.orValue("") == ''Automatic'' + ? self.?spec.managedBootImages.hasValue() || self.?status.managedBootImagesStatus.hasValue() + : true' + - message: when skew enforcement is in Automatic mode, managedBootImages must + contain a MachineManager opting in all MachineAPI MachineSets + rule: 'self.?status.bootImageSkewEnforcementStatus.mode.orValue("") == ''Automatic'' + ? !(self.?spec.managedBootImages.machineManagers.hasValue()) || self.spec.managedBootImages.machineManagers.exists(m, + m.selection.mode == ''All'' && m.resource == ''machinesets'' && m.apiGroup + == ''machine.openshift.io'') : true' + - message: when skew enforcement is in Automatic mode, managedBootImagesStatus + must contain a MachineManager opting in all MachineAPI MachineSets + rule: 'self.?status.bootImageSkewEnforcementStatus.mode.orValue("") == ''Automatic'' + ? !(self.?status.managedBootImagesStatus.machineManagers.hasValue()) || + self.status.managedBootImagesStatus.machineManagers.exists(m, m.selection.mode + == ''All'' && m.resource == ''machinesets'' && m.apiGroup == ''machine.openshift.io''): + true' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..14a864201 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,1298 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: machineconfigurations.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MachineConfiguration + listKind: MachineConfigurationList + plural: machineconfigurations + singular: machineconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + MachineConfiguration provides information to configure an operator to manage Machine Configuration. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Machine Config Operator + properties: + failedRevisionLimit: + description: |- + failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: |- + forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + this time instead of failing again on the same config. + type: string + irreconcilableValidationOverrides: + description: |- + irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that + cannot be applied to existing nodes. + When specified, the fields configured with validation overrides will no longer reject changes to those + respective fields due to them not being able to be applied to existing nodes. + Only newly provisioned nodes will have these configurations applied. + Existing nodes will report observed configuration differences in their MachineConfigNode status. + minProperties: 1 + properties: + storage: + description: |- + storage can be used to allow making irreconcilable changes to the selected sections under the + `spec.config.storage` field of MachineConfig CRs + It must have at least one item, may not exceed 3 items and must not contain duplicates. + Allowed element values are "Disks", "FileSystems", "Raid" and omitted. + When contains "Disks" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. + When contains "FileSystems" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. + When contains "Raid" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. + When omitted changes to the `spec.config.storage` section are forbidden. + items: + description: IrreconcilableValidationOverridesStorage defines + available storage irreconcilable overrides. + enum: + - Disks + - FileSystems + - Raid + type: string + maxItems: 3 + minItems: 1 + type: array + x-kubernetes-list-type: set + type: object + logLevel: + default: Normal + description: |- + logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for their operands. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managedBootImages: + description: |- + managedBootImages allows configuration for the management of boot images for machine + resources within the cluster. This configuration allows users to select resources that should + be updated to the latest boot images during cluster upgrades, ensuring that new machines + always boot with the current cluster version's boot image. When omitted, this means no opinion + and the platform is left to choose a reasonable default, which is subject to change over time. + The default for each machine manager mode is All for GCP and AWS platforms, and None for all + other platforms. + properties: + machineManagers: + description: |- + machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted per type of machine management resource. + items: + description: |- + MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information + such as the resource type and the API Group of the resource. It also provides granular control via the selection field. + properties: + apiGroup: + description: |- + apiGroup is name of the APIGroup that the machine management resource belongs to. + The only current valid value is machine.openshift.io. + machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: |- + resource is the machine management resource's type. + Valid values are machinesets and controlplanemachinesets. + machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. + enum: + - machinesets + - controlplanemachinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: |- + mode determines how machine managers will be selected for updates. + Valid values are All, Partial and None. + All means that every resource matched by the machine manager will be updated. + Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. + None means that every resource matched by the machine manager will not be updated. + enum: + - All + - Partial + - None + type: string + partial: + description: |- + partial provides label selector(s) that can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' + maxItems: 5 + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodeDisruptionPolicy: + description: |- + nodeDisruptionPolicy allows an admin to set granular node disruption actions for + MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow + for less downtime when doing small configuration updates to the cluster. This configuration + has no effect on cluster upgrades which will still incur node disruption where required. + properties: + files: + description: |- + files is a list of MachineConfig file definitions and actions to take to changes on those paths + This list supports a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecFile is a file entry and + corresponding actions to take and is used in the NodeDisruptionPolicyConfig + object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + path: + description: |- + path is the location of a file being managed through a MachineConfig. + The Actions in the policy will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: |- + sshkey maps to the ignition.sshkeys field in the MachineConfig object, definition an action for this + will apply to all sshkey changes in the cluster + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and forbidden + otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? has(self.restart) + : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) == + 1 : true' + - message: None action can only be specified standalone, as + it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + required: + - actions + type: object + units: + description: |- + units is a list MachineConfig unit definitions and actions to take on changes to those services + This list supports a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecUnit is a systemd unit + name and corresponding actions to take and is used in the + NodeDisruptionPolicyConfig object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicySpecActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload and None. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + name: + description: |- + name represents the service name of a systemd service managed through a MachineConfig + Actions specified will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected format + is ${NAME}${SERVICETYPE}, where {NAME} must be atleast + 1 character long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + observedConfig: + description: |- + observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: |- + succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + -1 = unlimited, 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: |- + unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + Red Hat does not support the use of this field. + Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + Seek guidance from the Red Hat support before using this field. + Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Machine + Config Operator + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + managedBootImagesStatus: + description: |- + managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is + and will be used by Machine Config Controller while performing boot image updates. + properties: + machineManagers: + description: |- + machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted per type of machine management resource. + items: + description: |- + MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information + such as the resource type and the API Group of the resource. It also provides granular control via the selection field. + properties: + apiGroup: + description: |- + apiGroup is name of the APIGroup that the machine management resource belongs to. + The only current valid value is machine.openshift.io. + machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: |- + resource is the machine management resource's type. + Valid values are machinesets and controlplanemachinesets. + machinesets means that the machine manager will only register resources of the kind MachineSet. + controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet. + enum: + - machinesets + - controlplanemachinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: |- + mode determines how machine managers will be selected for updates. + Valid values are All, Partial and None. + All means that every resource matched by the machine manager will be updated. + Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. + Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. + None means that every resource matched by the machine manager will not be updated. + enum: + - All + - Partial + - None + type: string + partial: + description: |- + partial provides label selector(s) that can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + x-kubernetes-validations: + - message: Only All or None selection mode is permitted for + ControlPlaneMachineSets + rule: self.resource != 'controlplanemachinesets' || self.selection.mode + == 'All' || self.selection.mode == 'None' + maxItems: 5 + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + nodeDisruptionPolicyStatus: + description: |- + nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, + and will be used by the Machine Config Daemon during future node updates. + properties: + clusterPolicies: + description: clusterPolicies is a merge of cluster default and + user provided node disruption policies. + properties: + files: + description: files is a list of MachineConfig file definitions + and actions to take to changes on those paths + items: + description: NodeDisruptionPolicyStatusFile is a file entry + and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus + object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + path: + description: |- + path is the location of a file being managed through a MachineConfig. + The Actions in the policy will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: sshkey is the overall sshkey MachineConfig definition + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? + has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + required: + - actions + type: object + units: + description: units is a list MachineConfig unit definitions + and actions to take on changes to those services + items: + description: NodeDisruptionPolicyStatusUnit is a systemd + unit name and corresponding actions to take and is used + in the NodeDisruptionPolicyClusterStatus object + properties: + actions: + description: |- + actions represents the series of commands to be executed on changes to the file at + the corresponding file path. Actions will be applied in the order that + they are set in this list. If there are other incoming changes to other MachineConfig + entries in the same update that require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: |- + serviceName is the full name (e.g. crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: |- + type represents the commands that will be carried out if this NodeDisruptionPolicyStatusActionType is executed + Valid values are Reboot, Drain, Reload, Restart, DaemonReload, None and Special. + reload/restart requires a corresponding service target specified in the reload/restart field. + Other values require no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + name: + description: |- + name represents the service name of a systemd service managed through a MachineConfig + Actions specified will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. + ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". + ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", + ".mount", ".automount", ".swap", ".target", ".path", + ".timer",".snapshot", ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: object + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index d8f3cbc2f..fd83694c2 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -390,6 +390,41 @@ func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootImageSkewEnforcementConfig) DeepCopyInto(out *BootImageSkewEnforcementConfig) { + *out = *in + out.Manual = in.Manual + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootImageSkewEnforcementConfig. +func (in *BootImageSkewEnforcementConfig) DeepCopy() *BootImageSkewEnforcementConfig { + if in == nil { + return nil + } + out := new(BootImageSkewEnforcementConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootImageSkewEnforcementStatus) DeepCopyInto(out *BootImageSkewEnforcementStatus) { + *out = *in + out.Automatic = in.Automatic + out.Manual = in.Manual + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootImageSkewEnforcementStatus. +func (in *BootImageSkewEnforcementStatus) DeepCopy() *BootImageSkewEnforcementStatus { + if in == nil { + return nil + } + out := new(BootImageSkewEnforcementStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) { *out = *in @@ -676,6 +711,38 @@ func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBootImageAutomatic) DeepCopyInto(out *ClusterBootImageAutomatic) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBootImageAutomatic. +func (in *ClusterBootImageAutomatic) DeepCopy() *ClusterBootImageAutomatic { + if in == nil { + return nil + } + out := new(ClusterBootImageAutomatic) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBootImageManual) DeepCopyInto(out *ClusterBootImageManual) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBootImageManual. +func (in *ClusterBootImageManual) DeepCopy() *ClusterBootImageManual { + if in == nil { + return nil + } + out := new(ClusterBootImageManual) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) { *out = *in @@ -2640,6 +2707,27 @@ func (in *InsightsReport) DeepCopy() *InsightsReport { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IrreconcilableValidationOverrides) DeepCopyInto(out *IrreconcilableValidationOverrides) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]IrreconcilableValidationOverridesStorage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IrreconcilableValidationOverrides. +func (in *IrreconcilableValidationOverrides) DeepCopy() *IrreconcilableValidationOverrides { + if in == nil { + return nil + } + out := new(IrreconcilableValidationOverrides) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { *out = *in @@ -3221,6 +3309,8 @@ func (in *MachineConfigurationSpec) DeepCopyInto(out *MachineConfigurationSpec) in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) in.ManagedBootImages.DeepCopyInto(&out.ManagedBootImages) in.NodeDisruptionPolicy.DeepCopyInto(&out.NodeDisruptionPolicy) + in.IrreconcilableValidationOverrides.DeepCopyInto(&out.IrreconcilableValidationOverrides) + out.BootImageSkewEnforcement = in.BootImageSkewEnforcement return } @@ -3246,6 +3336,7 @@ func (in *MachineConfigurationStatus) DeepCopyInto(out *MachineConfigurationStat } in.NodeDisruptionPolicyStatus.DeepCopyInto(&out.NodeDisruptionPolicyStatus) in.ManagedBootImagesStatus.DeepCopyInto(&out.ManagedBootImagesStatus) + out.BootImageSkewEnforcementStatus = in.BootImageSkewEnforcementStatus return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 5b2ca202f..e7c94e286 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -175,10 +175,7 @@ ingresscontrollers.operator.openshift.io: CRDName: ingresscontrollers.operator.openshift.io Capability: Ingress Category: "" - FeatureGates: - - IngressControllerLBSubnetsAWS - - SetEIPForNLBIngressController - - SetEIPForNLBIngressController+IngressControllerLBSubnetsAWS + FeatureGates: [] FilenameOperatorName: ingress FilenameOperatorOrdering: "00" FilenameRunLevel: "0000_50" @@ -220,7 +217,8 @@ kubeapiservers.operator.openshift.io: CRDName: kubeapiservers.operator.openshift.io Capability: "" Category: coreoperators - FeatureGates: [] + FeatureGates: + - EventTTL FilenameOperatorName: kube-apiserver FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_20" @@ -305,7 +303,10 @@ machineconfigurations.operator.openshift.io: Capability: "" Category: "" FeatureGates: + - BootImageSkewEnforcement + - IrreconcilableMachineConfig - ManagedBootImages + - ManagedBootImages+ManagedBootImagesCPMS FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 582f9686f..483d9720d 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -1314,6 +1314,14 @@ func (KubeAPIServerList) SwaggerDoc() map[string]string { return map_KubeAPIServerList } +var map_KubeAPIServerSpec = map[string]string{ + "eventTTLMinutes": "eventTTLMinutes specifies the amount of time that the events are stored before being deleted. The TTL is allowed between 5 minutes minimum up to a maximum of 180 minutes (3 hours).\n\nLowering this value will reduce the storage required in etcd. Note that this setting will only apply to new events being created and will not update existing events.\n\nWhen omitted this means no opinion, and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is 3h (180 minutes).", +} + +func (KubeAPIServerSpec) SwaggerDoc() map[string]string { + return map_KubeAPIServerSpec +} + var map_KubeAPIServerStatus = map[string]string{ "serviceAccountIssuers": "serviceAccountIssuers tracks history of used service account issuers. The item without expiration time represents the currently used service account issuer. The other items represents service account issuers that were used previously and are still being trusted. The default expiration for the items is set by the platform and it defaults to 24h. see: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection", } @@ -1379,6 +1387,57 @@ func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { return map_KubeStorageVersionMigratorList } +var map_BootImageSkewEnforcementConfig = map[string]string{ + "": "BootImageSkewEnforcementConfig is used to configure how boot image version skew is enforced on the cluster.", + "mode": "mode determines the underlying behavior of skew enforcement mechanism. Valid values are Manual and None. Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP & RHCOS version associated with the last boot image update in the manual field. In Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the skew limit described by the release image. None means that the MCO will no longer monitor the boot image skew. This may affect the cluster's ability to scale. This field is required.", + "manual": "manual describes the current boot image of the cluster. This should be set to the oldest boot image used amongst all machine resources in the cluster. This must include either the RHCOS version of the boot image or the OCP release version which shipped with that RHCOS boot image. Required when mode is set to \"Manual\" and forbidden otherwise.", +} + +func (BootImageSkewEnforcementConfig) SwaggerDoc() map[string]string { + return map_BootImageSkewEnforcementConfig +} + +var map_BootImageSkewEnforcementStatus = map[string]string{ + "": "BootImageSkewEnforcementStatus is the type for the status object. It represents the cluster defaults when the boot image skew enforcement configuration is undefined and reflects the actual configuration when it is defined.", + "mode": "mode determines the underlying behavior of skew enforcement mechanism. Valid values are Automatic, Manual and None. Automatic means that the MCO will perform boot image updates and store the OCP & RHCOS version associated with the last boot image update in the automatic field. Manual means that the cluster admin is expected to perform manual boot image updates and store the OCP & RHCOS version associated with the last boot image update in the manual field. In Automatic and Manual mode, the MCO will prevent upgrades when the boot image skew exceeds the skew limit described by the release image. None means that the MCO will no longer monitor the boot image skew. This may affect the cluster's ability to scale. This field is required.", + "automatic": "automatic describes the current boot image of the cluster. This will be populated by the MCO when performing boot image updates. This value will be compared against the cluster's skew limit to determine skew compliance. Required when mode is set to \"Automatic\" and forbidden otherwise.", + "manual": "manual describes the current boot image of the cluster. This will be populated by the MCO using the values provided in the spec.bootImageSkewEnforcement.manual field. This value will be compared against the cluster's skew limit to determine skew compliance. Required when mode is set to \"Manual\" and forbidden otherwise.", +} + +func (BootImageSkewEnforcementStatus) SwaggerDoc() map[string]string { + return map_BootImageSkewEnforcementStatus +} + +var map_ClusterBootImageAutomatic = map[string]string{ + "": "ClusterBootImageAutomatic is used to describe the cluster boot image in Automatic mode. It stores the RHCOS version of the boot image and the OCP release version which shipped with that RHCOS boot image. At least one of these values are required. If ocpVersion and rhcosVersion are defined, both values will be used for checking skew compliance. If only ocpVersion is defined, only that value will be used for checking skew compliance. If only rhcosVersion is defined, only that value will be used for checking skew compliance.", + "ocpVersion": "ocpVersion provides a string which represents the OCP version of the boot image. This field must match the OCP semver compatible format of x.y.z. This field must be between 5 and 10 characters long.", + "rhcosVersion": "rhcosVersion provides a string which represents the RHCOS version of the boot image This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between 14 and 21 characters long.", +} + +func (ClusterBootImageAutomatic) SwaggerDoc() map[string]string { + return map_ClusterBootImageAutomatic +} + +var map_ClusterBootImageManual = map[string]string{ + "": "ClusterBootImageManual is used to describe the cluster boot image in Manual mode.", + "mode": "mode is used to configure which boot image field is defined in Manual mode. Valid values are OCPVersion and RHCOSVersion. OCPVersion means that the cluster admin is expected to set the OCP version associated with the last boot image update in the OCPVersion field. RHCOSVersion means that the cluster admin is expected to set the RHCOS version associated with the last boot image update in the RHCOSVersion field. This field is required.", + "ocpVersion": "ocpVersion provides a string which represents the OCP version of the boot image. This field must match the OCP semver compatible format of x.y.z. This field must be between 5 and 10 characters long. Required when mode is set to \"OCPVersion\" and forbidden otherwise.", + "rhcosVersion": "rhcosVersion provides a string which represents the RHCOS version of the boot image This field must match rhcosVersion formatting of [major].[minor].[datestamp(YYYYMMDD)]-[buildnumber] or the legacy format of [major].[minor].[timestamp(YYYYMMDDHHmm)]-[buildnumber]. This field must be between 14 and 21 characters long. Required when mode is set to \"RHCOSVersion\" and forbidden otherwise.", +} + +func (ClusterBootImageManual) SwaggerDoc() map[string]string { + return map_ClusterBootImageManual +} + +var map_IrreconcilableValidationOverrides = map[string]string{ + "": "IrreconcilableValidationOverrides holds the irreconcilable validations overrides to be applied on each rendered MachineConfig generation.", + "storage": "storage can be used to allow making irreconcilable changes to the selected sections under the `spec.config.storage` field of MachineConfig CRs It must have at least one item, may not exceed 3 items and must not contain duplicates. Allowed element values are \"Disks\", \"FileSystems\", \"Raid\" and omitted. When contains \"Disks\" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. When contains \"FileSystems\" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. When contains \"Raid\" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. When omitted changes to the `spec.config.storage` section are forbidden.", +} + +func (IrreconcilableValidationOverrides) SwaggerDoc() map[string]string { + return map_IrreconcilableValidationOverrides +} + var map_MachineConfiguration = map[string]string{ "": "MachineConfiguration provides information to configure an operator to manage Machine Configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1401,8 +1460,10 @@ func (MachineConfigurationList) SwaggerDoc() map[string]string { } var map_MachineConfigurationSpec = map[string]string{ - "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", - "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", + "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", + "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", + "irreconcilableValidationOverrides": "irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that cannot be applied to existing nodes. When specified, the fields configured with validation overrides will no longer reject changes to those respective fields due to them not being able to be applied to existing nodes. Only newly provisioned nodes will have these configurations applied. Existing nodes will report observed configuration differences in their MachineConfigNode status.", + "bootImageSkewEnforcement": "bootImageSkewEnforcement allows an admin to configure how boot image version skew is enforced on the cluster. When omitted, this will default to Automatic for clusters that support automatic boot image updates. For clusters that do not support automatic boot image updates, cluster upgrades will be disabled until a skew enforcement mode has been specified. When version skew is being enforced, cluster upgrades will be disabled until the version skew is deemed acceptable for the current release payload.", } func (MachineConfigurationSpec) SwaggerDoc() map[string]string { @@ -1410,10 +1471,11 @@ func (MachineConfigurationSpec) SwaggerDoc() map[string]string { } var map_MachineConfigurationStatus = map[string]string{ - "observedGeneration": "observedGeneration is the last generation change you've dealt with", - "conditions": "conditions is a list of conditions and their status", - "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", - "managedBootImagesStatus": "managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is and will be used by Machine Config Controller while performing boot image updates.", + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "nodeDisruptionPolicyStatus": "nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, and will be used by the Machine Config Daemon during future node updates.", + "managedBootImagesStatus": "managedBootImagesStatus reflects what the latest cluster-validated boot image configuration is and will be used by Machine Config Controller while performing boot image updates.", + "bootImageSkewEnforcementStatus": "bootImageSkewEnforcementStatus reflects what the latest cluster-validated boot image skew enforcement configuration is and will be used by Machine Config Controller while performing boot image skew enforcement. When omitted, the MCO has no knowledge of how to enforce boot image skew. When the MCO does not know how boot image skew should be enforced, cluster upgrades will be blocked until it can either automatically determine skew enforcement or there is an explicit skew enforcement configuration provided in the spec.bootImageSkewEnforcement field.", } func (MachineConfigurationStatus) SwaggerDoc() map[string]string { @@ -1422,7 +1484,7 @@ func (MachineConfigurationStatus) SwaggerDoc() map[string]string { var map_MachineManager = map[string]string{ "": "MachineManager describes a target machine resource that is registered for boot image updates. It stores identifying information such as the resource type and the API Group of the resource. It also provides granular control via the selection field.", - "resource": "resource is the machine management resource's type. The only current valid value is machinesets. machinesets means that the machine manager will only register resources of the kind MachineSet.", + "resource": "resource is the machine management resource's type. Valid values are machinesets and controlplanemachinesets. machinesets means that the machine manager will only register resources of the kind MachineSet. controlplanemachinesets means that the machine manager will only register resources of the kind ControlPlaneMachineSet.", "apiGroup": "apiGroup is name of the APIGroup that the machine management resource belongs to. The only current valid value is machine.openshift.io. machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group.", "selection": "selection allows granular control of the machine management resources that will be registered for boot image updates.", } @@ -1432,7 +1494,7 @@ func (MachineManager) SwaggerDoc() map[string]string { } var map_MachineManagerSelector = map[string]string{ - "mode": "mode determines how machine managers will be selected for updates. Valid values are All and Partial. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. None means that every resource matched by the machine manager will not be updated.", + "mode": "mode determines how machine managers will be selected for updates. Valid values are All, Partial and None. All means that every resource matched by the machine manager will be updated. Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. Partial is not permitted for the controlplanemachinesets resource type as they are a singleton within the cluster. None means that every resource matched by the machine manager will not be updated.", "partial": "partial provides label selector(s) that can be used to match machine management resources. Only permitted when mode is set to \"Partial\".", } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go index 0d2c3e4f8..df593a666 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/apiserver.go @@ -64,6 +64,7 @@ func extractAPIServer(aPIServer *configv1.APIServer, fieldManager string, subres b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b APIServerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *APIServerApplyConfiguration) WithStatus(value configv1.APIServerStatus) return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *APIServerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *APIServerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *APIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *APIServerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go index 6ae8497a5..39d260e54 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/authentication.go @@ -64,6 +64,7 @@ func extractAuthentication(authentication *configv1.Authentication, fieldManager b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b AuthenticationApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatu return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *AuthenticationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go index 5348a3c99..3d1a83d28 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go @@ -9,11 +9,12 @@ import ( // AzurePlatformStatusApplyConfiguration represents a declarative configuration of the AzurePlatformStatus type for use // with apply. type AzurePlatformStatusApplyConfiguration struct { - ResourceGroupName *string `json:"resourceGroupName,omitempty"` - NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"` - CloudName *configv1.AzureCloudEnvironment `json:"cloudName,omitempty"` - ARMEndpoint *string `json:"armEndpoint,omitempty"` - ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"` + CloudName *configv1.AzureCloudEnvironment `json:"cloudName,omitempty"` + ARMEndpoint *string `json:"armEndpoint,omitempty"` + ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` } // AzurePlatformStatusApplyConfiguration constructs a declarative configuration of the AzurePlatformStatus type for use with @@ -66,3 +67,11 @@ func (b *AzurePlatformStatusApplyConfiguration) WithResourceTags(values ...*Azur } return b } + +// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call. +func (b *AzurePlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *AzurePlatformStatusApplyConfiguration { + b.CloudLoadBalancerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go index cdadabcae..606505281 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/build.go @@ -63,6 +63,7 @@ func extractBuild(build *configv1.Build, fieldManager string, subresource string b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b BuildApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -230,8 +231,24 @@ func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) * return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *BuildApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *BuildApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *BuildApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *BuildApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go index 1ee4a91fb..eb722c572 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterimagepolicy.go @@ -64,6 +64,7 @@ func extractClusterImagePolicy(clusterImagePolicy *configv1.ClusterImagePolicy, b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ClusterImagePolicyApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ClusterImagePolicyApplyConfiguration) WithStatus(value *ClusterImagePol return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterImagePolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go index 4bfa43805..66f1d1988 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusteroperator.go @@ -64,6 +64,7 @@ func extractClusterOperator(clusterOperator *configv1.ClusterOperator, fieldMana b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ClusterOperatorApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ClusterOperatorApplyConfiguration) WithStatus(value *ClusterOperatorSta return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ClusterOperatorApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ClusterOperatorApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterOperatorApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ClusterOperatorApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go index 69073ee5c..b85a770ed 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clusterversion.go @@ -64,6 +64,7 @@ func extractClusterVersion(clusterVersion *configv1.ClusterVersion, fieldManager b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ClusterVersionApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ClusterVersionApplyConfiguration) WithStatus(value *ClusterVersionStatu return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ClusterVersionApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ClusterVersionApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterVersionApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ClusterVersionApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go index 8e04091da..e4d496e1a 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/console.go @@ -64,6 +64,7 @@ func extractConsole(console *configv1.Console, fieldManager string, subresource b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ConsoleApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfigur return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConsoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go new file mode 100644 index 000000000..77234d0df --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/custom.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CustomApplyConfiguration represents a declarative configuration of the Custom type for use +// with apply. +type CustomApplyConfiguration struct { + Configs []GathererConfigApplyConfiguration `json:"configs,omitempty"` +} + +// CustomApplyConfiguration constructs a declarative configuration of the Custom type for use with +// apply. +func Custom() *CustomApplyConfiguration { + return &CustomApplyConfiguration{} +} + +// WithConfigs adds the given value to the Configs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Configs field. +func (b *CustomApplyConfiguration) WithConfigs(values ...*GathererConfigApplyConfiguration) *CustomApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfigs") + } + b.Configs = append(b.Configs, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go index 4ca934c96..2ff9dc857 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dns.go @@ -64,6 +64,7 @@ func extractDNS(dNS *configv1.DNS, fieldManager string, subresource string) (*DN b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b DNSApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *DNSApplyConfiguration) WithStatus(value configv1.DNSStatus) *DNSApplyCo return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *DNSApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *DNSApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *DNSApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *DNSApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go index 73ec53314..2ec8b3af4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go @@ -64,6 +64,7 @@ func extractFeatureGate(featureGate *configv1.FeatureGate, fieldManager string, b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b FeatureGateApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *FeatureGateApplyConfiguration) WithStatus(value *FeatureGateStatusApply return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *FeatureGateApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *FeatureGateApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *FeatureGateApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *FeatureGateApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go new file mode 100644 index 000000000..eaa796519 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherconfig.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// GatherConfigApplyConfiguration represents a declarative configuration of the GatherConfig type for use +// with apply. +type GatherConfigApplyConfiguration struct { + DataPolicy []configv1.DataPolicyOption `json:"dataPolicy,omitempty"` + Gatherers *GatherersApplyConfiguration `json:"gatherers,omitempty"` + Storage *StorageApplyConfiguration `json:"storage,omitempty"` +} + +// GatherConfigApplyConfiguration constructs a declarative configuration of the GatherConfig type for use with +// apply. +func GatherConfig() *GatherConfigApplyConfiguration { + return &GatherConfigApplyConfiguration{} +} + +// WithDataPolicy adds the given value to the DataPolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DataPolicy field. +func (b *GatherConfigApplyConfiguration) WithDataPolicy(values ...configv1.DataPolicyOption) *GatherConfigApplyConfiguration { + for i := range values { + b.DataPolicy = append(b.DataPolicy, values[i]) + } + return b +} + +// WithGatherers sets the Gatherers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Gatherers field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithGatherers(value *GatherersApplyConfiguration) *GatherConfigApplyConfiguration { + b.Gatherers = value + return b +} + +// WithStorage sets the Storage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Storage field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithStorage(value *StorageApplyConfiguration) *GatherConfigApplyConfiguration { + b.Storage = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go new file mode 100644 index 000000000..caa8b79d0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gathererconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// GathererConfigApplyConfiguration represents a declarative configuration of the GathererConfig type for use +// with apply. +type GathererConfigApplyConfiguration struct { + Name *string `json:"name,omitempty"` + State *configv1.GathererState `json:"state,omitempty"` +} + +// GathererConfigApplyConfiguration constructs a declarative configuration of the GathererConfig type for use with +// apply. +func GathererConfig() *GathererConfigApplyConfiguration { + return &GathererConfigApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GathererConfigApplyConfiguration) WithName(value string) *GathererConfigApplyConfiguration { + b.Name = &value + return b +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *GathererConfigApplyConfiguration) WithState(value configv1.GathererState) *GathererConfigApplyConfiguration { + b.State = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go new file mode 100644 index 000000000..32469f512 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/gatherers.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// GatherersApplyConfiguration represents a declarative configuration of the Gatherers type for use +// with apply. +type GatherersApplyConfiguration struct { + Mode *configv1.GatheringMode `json:"mode,omitempty"` + Custom *CustomApplyConfiguration `json:"custom,omitempty"` +} + +// GatherersApplyConfiguration constructs a declarative configuration of the Gatherers type for use with +// apply. +func Gatherers() *GatherersApplyConfiguration { + return &GatherersApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *GatherersApplyConfiguration) WithMode(value configv1.GatheringMode) *GatherersApplyConfiguration { + b.Mode = &value + return b +} + +// WithCustom sets the Custom field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Custom field is set to the value of the last call. +func (b *GatherersApplyConfiguration) WithCustom(value *CustomApplyConfiguration) *GatherersApplyConfiguration { + b.Custom = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go index 63009029e..666ef86eb 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/image.go @@ -64,6 +64,7 @@ func extractImage(image *configv1.Image, fieldManager string, subresource string b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ImageApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ImageApplyConfiguration) WithStatus(value *ImageStatusApplyConfiguratio return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ImageApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ImageApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ImageApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go index 6d47fac03..4235d2f51 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagecontentpolicy.go @@ -63,6 +63,7 @@ func extractImageContentPolicy(imageContentPolicy *configv1.ImageContentPolicy, b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ImageContentPolicyApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -230,8 +231,24 @@ func (b *ImageContentPolicyApplyConfiguration) WithSpec(value *ImageContentPolic return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ImageContentPolicyApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ImageContentPolicyApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageContentPolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ImageContentPolicyApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go index f3c5ca21a..1e4bb2857 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagedigestmirrorset.go @@ -64,6 +64,7 @@ func extractImageDigestMirrorSet(imageDigestMirrorSet *configv1.ImageDigestMirro b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ImageDigestMirrorSetApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ImageDigestMirrorSetApplyConfiguration) WithStatus(value configv1.Image return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ImageDigestMirrorSetApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ImageDigestMirrorSetApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageDigestMirrorSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ImageDigestMirrorSetApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go index 6ccc3746a..6ae64c679 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagepolicy.go @@ -66,6 +66,7 @@ func extractImagePolicy(imagePolicy *configv1.ImagePolicy, fieldManager string, b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ImagePolicyApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -241,8 +242,24 @@ func (b *ImagePolicyApplyConfiguration) WithStatus(value *ImagePolicyStatusApply return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImagePolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go index b8a9de192..3a7328112 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/imagetagmirrorset.go @@ -64,6 +64,7 @@ func extractImageTagMirrorSet(imageTagMirrorSet *configv1.ImageTagMirrorSet, fie b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ImageTagMirrorSetApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ImageTagMirrorSetApplyConfiguration) WithStatus(value configv1.ImageTag return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ImageTagMirrorSetApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ImageTagMirrorSetApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImageTagMirrorSetApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ImageTagMirrorSetApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go index 970391cfa..b98a22948 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructure.go @@ -64,6 +64,7 @@ func extractInfrastructure(infrastructure *configv1.Infrastructure, fieldManager b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b InfrastructureApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *InfrastructureApplyConfiguration) WithStatus(value *InfrastructureStatu return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InfrastructureApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InfrastructureApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *InfrastructureApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InfrastructureApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go index 945bacf8a..b1680f352 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingress.go @@ -64,6 +64,7 @@ func extractIngress(ingress *configv1.Ingress, fieldManager string, subresource b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b IngressApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *IngressApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *IngressApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *IngressApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go new file mode 100644 index 000000000..829a4071a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagather.go @@ -0,0 +1,254 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InsightsDataGatherApplyConfiguration represents a declarative configuration of the InsightsDataGather type for use +// with apply. +type InsightsDataGatherApplyConfiguration struct { + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsDataGatherSpecApplyConfiguration `json:"spec,omitempty"` +} + +// InsightsDataGather constructs a declarative configuration of the InsightsDataGather type for use with +// apply. +func InsightsDataGather(name string) *InsightsDataGatherApplyConfiguration { + b := &InsightsDataGatherApplyConfiguration{} + b.WithName(name) + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractInsightsDataGather extracts the applied configuration owned by fieldManager from +// insightsDataGather. If no managedFields are found in insightsDataGather for fieldManager, a +// InsightsDataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// insightsDataGather must be a unmodified InsightsDataGather API object that was retrieved from the Kubernetes API. +// ExtractInsightsDataGather provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInsightsDataGather(insightsDataGather *configv1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "") +} + +// ExtractInsightsDataGatherStatus is the same as ExtractInsightsDataGather except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInsightsDataGatherStatus(insightsDataGather *configv1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "status") +} + +func extractInsightsDataGather(insightsDataGather *configv1.InsightsDataGather, fieldManager string, subresource string) (*InsightsDataGatherApplyConfiguration, error) { + b := &InsightsDataGatherApplyConfiguration{} + err := managedfields.ExtractInto(insightsDataGather, internal.Parser().Type("com.github.openshift.api.config.v1.InsightsDataGather"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(insightsDataGather.Name) + + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} +func (b InsightsDataGatherApplyConfiguration) IsApplyConfiguration() {} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *InsightsDataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithSpec(value *InsightsDataGatherSpecApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.Spec = value + return b +} + +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go new file mode 100644 index 000000000..4be6d441a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/insightsdatagatherspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// InsightsDataGatherSpecApplyConfiguration represents a declarative configuration of the InsightsDataGatherSpec type for use +// with apply. +type InsightsDataGatherSpecApplyConfiguration struct { + GatherConfig *GatherConfigApplyConfiguration `json:"gatherConfig,omitempty"` +} + +// InsightsDataGatherSpecApplyConfiguration constructs a declarative configuration of the InsightsDataGatherSpec type for use with +// apply. +func InsightsDataGatherSpec() *InsightsDataGatherSpecApplyConfiguration { + return &InsightsDataGatherSpecApplyConfiguration{} +} + +// WithGatherConfig sets the GatherConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatherConfig field is set to the value of the last call. +func (b *InsightsDataGatherSpecApplyConfiguration) WithGatherConfig(value *GatherConfigApplyConfiguration) *InsightsDataGatherSpecApplyConfiguration { + b.GatherConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go index 195594ead..3502e6954 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/network.go @@ -64,6 +64,7 @@ func extractNetwork(network *configv1.Network, fieldManager string, subresource b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b NetworkApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfigur return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *NetworkApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go index 611705623..c66357229 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/node.go @@ -64,6 +64,7 @@ func extractNode(node *configv1.Node, fieldManager string, subresource string) ( b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b NodeApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *NodeApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *NodeApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *NodeApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *NodeApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go index 1c9589c08..37725fb7c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/oauth.go @@ -64,6 +64,7 @@ func extractOAuth(oAuth *configv1.OAuth, fieldManager string, subresource string b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b OAuthApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *OAuthApplyConfiguration) WithStatus(value configv1.OAuthStatus) *OAuthA return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *OAuthApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *OAuthApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *OAuthApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *OAuthApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go index df95eb84d..0dbba79c4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/operatorhub.go @@ -64,6 +64,7 @@ func extractOperatorHub(operatorHub *configv1.OperatorHub, fieldManager string, b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b OperatorHubApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *OperatorHubApplyConfiguration) WithStatus(value *OperatorHubStatusApply return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *OperatorHubApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *OperatorHubApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *OperatorHubApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *OperatorHubApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go new file mode 100644 index 000000000..49daf4bc2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeclaimreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PersistentVolumeClaimReferenceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimReference type for use +// with apply. +type PersistentVolumeClaimReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PersistentVolumeClaimReferenceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimReference type for use with +// apply. +func PersistentVolumeClaimReference() *PersistentVolumeClaimReferenceApplyConfiguration { + return &PersistentVolumeClaimReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PersistentVolumeClaimReferenceApplyConfiguration) WithName(value string) *PersistentVolumeClaimReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go new file mode 100644 index 000000000..c62fdbcf9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/persistentvolumeconfig.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PersistentVolumeConfigApplyConfiguration represents a declarative configuration of the PersistentVolumeConfig type for use +// with apply. +type PersistentVolumeConfigApplyConfiguration struct { + Claim *PersistentVolumeClaimReferenceApplyConfiguration `json:"claim,omitempty"` + MountPath *string `json:"mountPath,omitempty"` +} + +// PersistentVolumeConfigApplyConfiguration constructs a declarative configuration of the PersistentVolumeConfig type for use with +// apply. +func PersistentVolumeConfig() *PersistentVolumeConfigApplyConfiguration { + return &PersistentVolumeConfigApplyConfiguration{} +} + +// WithClaim sets the Claim field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Claim field is set to the value of the last call. +func (b *PersistentVolumeConfigApplyConfiguration) WithClaim(value *PersistentVolumeClaimReferenceApplyConfiguration) *PersistentVolumeConfigApplyConfiguration { + b.Claim = value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *PersistentVolumeConfigApplyConfiguration) WithMountPath(value string) *PersistentVolumeConfigApplyConfiguration { + b.MountPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go index 5c040bae4..e9b7c2c6b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/project.go @@ -64,6 +64,7 @@ func extractProject(project *configv1.Project, fieldManager string, subresource b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ProjectApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ProjectApplyConfiguration) WithStatus(value configv1.ProjectStatus) *Pr return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ProjectApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ProjectApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ProjectApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ProjectApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go index 7184cbd08..7992e28f2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/proxy.go @@ -64,6 +64,7 @@ func extractProxy(proxy *configv1.Proxy, fieldManager string, subresource string b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b ProxyApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ProxyApplyConfiguration) WithStatus(value *ProxyStatusApplyConfiguratio return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ProxyApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ProxyApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ProxyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ProxyApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go index fa2323d72..2f04f83ed 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/scheduler.go @@ -64,6 +64,7 @@ func extractScheduler(scheduler *configv1.Scheduler, fieldManager string, subres b.WithAPIVersion("config.openshift.io/v1") return b, nil } +func (b SchedulerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *SchedulerApplyConfiguration) WithStatus(value configv1.SchedulerStatus) return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *SchedulerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *SchedulerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *SchedulerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *SchedulerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go new file mode 100644 index 000000000..405df6c13 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/storage.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// StorageApplyConfiguration represents a declarative configuration of the Storage type for use +// with apply. +type StorageApplyConfiguration struct { + Type *configv1.StorageType `json:"type,omitempty"` + PersistentVolume *PersistentVolumeConfigApplyConfiguration `json:"persistentVolume,omitempty"` +} + +// StorageApplyConfiguration constructs a declarative configuration of the Storage type for use with +// apply. +func Storage() *StorageApplyConfiguration { + return &StorageApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithType(value configv1.StorageType) *StorageApplyConfiguration { + b.Type = &value + return b +} + +// WithPersistentVolume sets the PersistentVolume field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PersistentVolume field is set to the value of the last call. +func (b *StorageApplyConfiguration) WithPersistentVolume(value *PersistentVolumeConfigApplyConfiguration) *StorageApplyConfiguration { + b.PersistentVolume = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagerconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagerconfig.go new file mode 100644 index 000000000..44b5aa40b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagerconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// AlertmanagerConfigApplyConfiguration represents a declarative configuration of the AlertmanagerConfig type for use +// with apply. +type AlertmanagerConfigApplyConfiguration struct { + DeploymentMode *configv1alpha1.AlertManagerDeployMode `json:"deploymentMode,omitempty"` + CustomConfig *AlertmanagerCustomConfigApplyConfiguration `json:"customConfig,omitempty"` +} + +// AlertmanagerConfigApplyConfiguration constructs a declarative configuration of the AlertmanagerConfig type for use with +// apply. +func AlertmanagerConfig() *AlertmanagerConfigApplyConfiguration { + return &AlertmanagerConfigApplyConfiguration{} +} + +// WithDeploymentMode sets the DeploymentMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeploymentMode field is set to the value of the last call. +func (b *AlertmanagerConfigApplyConfiguration) WithDeploymentMode(value configv1alpha1.AlertManagerDeployMode) *AlertmanagerConfigApplyConfiguration { + b.DeploymentMode = &value + return b +} + +// WithCustomConfig sets the CustomConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomConfig field is set to the value of the last call. +func (b *AlertmanagerConfigApplyConfiguration) WithCustomConfig(value *AlertmanagerCustomConfigApplyConfiguration) *AlertmanagerConfigApplyConfiguration { + b.CustomConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go new file mode 100644 index 000000000..c22f3232b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go @@ -0,0 +1,99 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + v1 "k8s.io/api/core/v1" +) + +// AlertmanagerCustomConfigApplyConfiguration represents a declarative configuration of the AlertmanagerCustomConfig type for use +// with apply. +type AlertmanagerCustomConfigApplyConfiguration struct { + LogLevel *configv1alpha1.LogLevel `json:"logLevel,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` + Secrets []configv1alpha1.SecretName `json:"secrets,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// AlertmanagerCustomConfigApplyConfiguration constructs a declarative configuration of the AlertmanagerCustomConfig type for use with +// apply. +func AlertmanagerCustomConfig() *AlertmanagerCustomConfigApplyConfiguration { + return &AlertmanagerCustomConfigApplyConfiguration{} +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithLogLevel(value configv1alpha1.LogLevel) *AlertmanagerCustomConfigApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithNodeSelector(entries map[string]string) *AlertmanagerCustomConfigApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithResources(values ...*ContainerResourceApplyConfiguration) *AlertmanagerCustomConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithSecrets(values ...configv1alpha1.SecretName) *AlertmanagerCustomConfigApplyConfiguration { + for i := range values { + b.Secrets = append(b.Secrets, values[i]) + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithTolerations(values ...v1.Toleration) *AlertmanagerCustomConfigApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithTopologySpreadConstraints(values ...v1.TopologySpreadConstraint) *AlertmanagerCustomConfigApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} + +// WithVolumeClaimTemplate sets the VolumeClaimTemplate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeClaimTemplate field is set to the value of the last call. +func (b *AlertmanagerCustomConfigApplyConfiguration) WithVolumeClaimTemplate(value v1.PersistentVolumeClaim) *AlertmanagerCustomConfigApplyConfiguration { + b.VolumeClaimTemplate = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/audit.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/audit.go new file mode 100644 index 000000000..9caf3a038 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/audit.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// AuditApplyConfiguration represents a declarative configuration of the Audit type for use +// with apply. +type AuditApplyConfiguration struct { + Profile *configv1alpha1.AuditProfile `json:"profile,omitempty"` +} + +// AuditApplyConfiguration constructs a declarative configuration of the Audit type for use with +// apply. +func Audit() *AuditApplyConfiguration { + return &AuditApplyConfiguration{} +} + +// WithProfile sets the Profile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Profile field is set to the value of the last call. +func (b *AuditApplyConfiguration) WithProfile(value configv1alpha1.AuditProfile) *AuditApplyConfiguration { + b.Profile = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go index b4982de15..b9a92ae68 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/backup.go @@ -64,6 +64,7 @@ func extractBackup(backup *configv1alpha1.Backup, fieldManager string, subresour b.WithAPIVersion("config.openshift.io/v1alpha1") return b, nil } +func (b BackupApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *BackupApplyConfiguration) WithStatus(value configv1alpha1.BackupStatus) return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *BackupApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *BackupApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *BackupApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *BackupApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go index f3d7fdb77..36b6250b4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clusterimagepolicy.go @@ -64,6 +64,7 @@ func extractClusterImagePolicy(clusterImagePolicy *configv1alpha1.ClusterImagePo b.WithAPIVersion("config.openshift.io/v1alpha1") return b, nil } +func (b ClusterImagePolicyApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ClusterImagePolicyApplyConfiguration) WithStatus(value *ClusterImagePol return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterImagePolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ClusterImagePolicyApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go index b935706eb..c788283cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoring.go @@ -64,6 +64,7 @@ func extractClusterMonitoring(clusterMonitoring *configv1alpha1.ClusterMonitorin b.WithAPIVersion("config.openshift.io/v1alpha1") return b, nil } +func (b ClusterMonitoringApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ClusterMonitoringApplyConfiguration) WithStatus(value configv1alpha1.Cl return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ClusterMonitoringApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ClusterMonitoringApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterMonitoringApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ClusterMonitoringApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go index 28fa2d7c3..7fcce84b5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go @@ -5,7 +5,9 @@ package v1alpha1 // ClusterMonitoringSpecApplyConfiguration represents a declarative configuration of the ClusterMonitoringSpec type for use // with apply. type ClusterMonitoringSpecApplyConfiguration struct { - UserDefined *UserDefinedMonitoringApplyConfiguration `json:"userDefined,omitempty"` + UserDefined *UserDefinedMonitoringApplyConfiguration `json:"userDefined,omitempty"` + AlertmanagerConfig *AlertmanagerConfigApplyConfiguration `json:"alertmanagerConfig,omitempty"` + MetricsServerConfig *MetricsServerConfigApplyConfiguration `json:"metricsServerConfig,omitempty"` } // ClusterMonitoringSpecApplyConfiguration constructs a declarative configuration of the ClusterMonitoringSpec type for use with @@ -21,3 +23,19 @@ func (b *ClusterMonitoringSpecApplyConfiguration) WithUserDefined(value *UserDef b.UserDefined = value return b } + +// WithAlertmanagerConfig sets the AlertmanagerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AlertmanagerConfig field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithAlertmanagerConfig(value *AlertmanagerConfigApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.AlertmanagerConfig = value + return b +} + +// WithMetricsServerConfig sets the MetricsServerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MetricsServerConfig field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithMetricsServerConfig(value *MetricsServerConfigApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.MetricsServerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go new file mode 100644 index 000000000..b1f3ac898 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// ContainerResourceApplyConfiguration represents a declarative configuration of the ContainerResource type for use +// with apply. +type ContainerResourceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Request *resource.Quantity `json:"request,omitempty"` + Limit *resource.Quantity `json:"limit,omitempty"` +} + +// ContainerResourceApplyConfiguration constructs a declarative configuration of the ContainerResource type for use with +// apply. +func ContainerResource() *ContainerResourceApplyConfiguration { + return &ContainerResourceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ContainerResourceApplyConfiguration) WithName(value string) *ContainerResourceApplyConfiguration { + b.Name = &value + return b +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *ContainerResourceApplyConfiguration) WithRequest(value resource.Quantity) *ContainerResourceApplyConfiguration { + b.Request = &value + return b +} + +// WithLimit sets the Limit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limit field is set to the value of the last call. +func (b *ContainerResourceApplyConfiguration) WithLimit(value resource.Quantity) *ContainerResourceApplyConfiguration { + b.Limit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go index 6595aa782..0a8fcee74 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/imagepolicy.go @@ -66,6 +66,7 @@ func extractImagePolicy(imagePolicy *configv1alpha1.ImagePolicy, fieldManager st b.WithAPIVersion("config.openshift.io/v1alpha1") return b, nil } +func (b ImagePolicyApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -241,8 +242,24 @@ func (b *ImagePolicyApplyConfiguration) WithStatus(value *ImagePolicyStatusApply return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ImagePolicyApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ImagePolicyApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go index cf4ae1f00..f96ab5101 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go @@ -64,6 +64,7 @@ func extractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGa b.WithAPIVersion("config.openshift.io/v1alpha1") return b, nil } +func (b InsightsDataGatherApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha1.I return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *InsightsDataGatherApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go new file mode 100644 index 000000000..428b7a065 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go @@ -0,0 +1,88 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + v1 "k8s.io/api/core/v1" +) + +// MetricsServerConfigApplyConfiguration represents a declarative configuration of the MetricsServerConfig type for use +// with apply. +type MetricsServerConfigApplyConfiguration struct { + Audit *AuditApplyConfiguration `json:"audit,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + Verbosity *configv1alpha1.VerbosityLevel `json:"verbosity,omitempty"` + Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// MetricsServerConfigApplyConfiguration constructs a declarative configuration of the MetricsServerConfig type for use with +// apply. +func MetricsServerConfig() *MetricsServerConfigApplyConfiguration { + return &MetricsServerConfigApplyConfiguration{} +} + +// WithAudit sets the Audit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Audit field is set to the value of the last call. +func (b *MetricsServerConfigApplyConfiguration) WithAudit(value *AuditApplyConfiguration) *MetricsServerConfigApplyConfiguration { + b.Audit = value + return b +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *MetricsServerConfigApplyConfiguration) WithNodeSelector(entries map[string]string) *MetricsServerConfigApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *MetricsServerConfigApplyConfiguration) WithTolerations(values ...v1.Toleration) *MetricsServerConfigApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithVerbosity sets the Verbosity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Verbosity field is set to the value of the last call. +func (b *MetricsServerConfigApplyConfiguration) WithVerbosity(value configv1alpha1.VerbosityLevel) *MetricsServerConfigApplyConfiguration { + b.Verbosity = &value + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *MetricsServerConfigApplyConfiguration) WithResources(values ...*ContainerResourceApplyConfiguration) *MetricsServerConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *MetricsServerConfigApplyConfiguration) WithTopologySpreadConstraints(values ...v1.TopologySpreadConstraint) *MetricsServerConfigApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go index f0c9797c5..6f20059cf 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha2/insightsdatagather.go @@ -64,6 +64,7 @@ func extractInsightsDataGather(insightsDataGather *configv1alpha2.InsightsDataGa b.WithAPIVersion("config.openshift.io/v1alpha2") return b, nil } +func (b InsightsDataGatherApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha2.I return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *InsightsDataGatherApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InsightsDataGatherApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 439a5df05..de325e367 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -6,7 +6,7 @@ import ( fmt "fmt" sync "sync" - typed "sigs.k8s.io/structured-merge-diff/v4/typed" + typed "sigs.k8s.io/structured-merge-diff/v6/typed" ) func Parser() *typed.Parser { @@ -355,6 +355,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: armEndpoint type: scalar: string + - name: cloudLoadBalancerConfig + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + default: + dnsType: PlatformDefault - name: cloudName type: scalar: string @@ -1049,6 +1054,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.Custom + map: + fields: + - name: configs + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.GathererConfig + elementRelationship: associative + keys: + - name - name: com.github.openshift.api.config.v1.CustomFeatureGates map: fields: @@ -1415,6 +1431,47 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.GatherConfig + map: + fields: + - name: dataPolicy + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: gatherers + type: + namedType: com.github.openshift.api.config.v1.Gatherers + default: {} + - name: storage + type: + namedType: com.github.openshift.api.config.v1.Storage + default: {} +- name: com.github.openshift.api.config.v1.GathererConfig + map: + fields: + - name: name + type: + scalar: string + - name: state + type: + scalar: string +- name: com.github.openshift.api.config.v1.Gatherers + map: + fields: + - name: custom + type: + namedType: com.github.openshift.api.config.v1.Custom + default: {} + - name: mode + type: + scalar: string + unions: + - discriminator: mode + fields: + - fieldName: custom + discriminatorValue: Custom - name: com.github.openshift.api.config.v1.GitHubIdentityProvider map: fields: @@ -2007,6 +2064,30 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.InsightsDataGather + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.InsightsDataGatherSpec + default: {} +- name: com.github.openshift.api.config.v1.InsightsDataGatherSpec + map: + fields: + - name: gatherConfig + type: + namedType: com.github.openshift.api.config.v1.GatherConfig + default: {} - name: com.github.openshift.api.config.v1.IntermediateTLSProfile map: elementType: @@ -2927,6 +3008,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostname type: scalar: string +- name: com.github.openshift.api.config.v1.PersistentVolumeClaimReference + map: + fields: + - name: name + type: + scalar: string +- name: com.github.openshift.api.config.v1.PersistentVolumeConfig + map: + fields: + - name: claim + type: + namedType: com.github.openshift.api.config.v1.PersistentVolumeClaimReference + default: {} + - name: mountPath + type: + scalar: string - name: com.github.openshift.api.config.v1.PlatformSpec map: fields: @@ -3508,6 +3605,21 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.Storage + map: + fields: + - name: persistentVolume + type: + namedType: com.github.openshift.api.config.v1.PersistentVolumeConfig + default: {} + - name: type + type: + scalar: string + unions: + - discriminator: type + fields: + - fieldName: persistentVolume + discriminatorValue: PersistentVolume - name: com.github.openshift.api.config.v1.TLSSecurityProfile map: fields: @@ -3934,6 +4046,65 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.SecretNameReference default: {} +- name: com.github.openshift.api.config.v1alpha1.AlertmanagerConfig + map: + fields: + - name: customConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.AlertmanagerCustomConfig + default: {} + - name: deploymentMode + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.AlertmanagerCustomConfig + map: + fields: + - name: logLevel + type: + scalar: string + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: resources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha1.ContainerResource + elementRelationship: associative + keys: + - name + - name: secrets + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic + - name: topologySpreadConstraints + type: + list: + elementType: + namedType: io.k8s.api.core.v1.TopologySpreadConstraint + elementRelationship: associative + keys: + - topologyKey + - whenUnsatisfiable + - name: volumeClaimTemplate + type: + namedType: io.k8s.api.core.v1.PersistentVolumeClaim +- name: com.github.openshift.api.config.v1alpha1.Audit + map: + fields: + - name: profile + type: + scalar: string - name: com.github.openshift.api.config.v1alpha1.Backup map: fields: @@ -4043,6 +4214,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1alpha1.ClusterMonitoringSpec map: fields: + - name: alertmanagerConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.AlertmanagerConfig + default: {} + - name: metricsServerConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.MetricsServerConfig + default: {} - name: userDefined type: namedType: com.github.openshift.api.config.v1alpha1.UserDefinedMonitoring @@ -4059,6 +4238,18 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.config.v1alpha1.ContainerResource + map: + fields: + - name: limit + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: name + type: + scalar: string + - name: request + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: com.github.openshift.api.config.v1alpha1.EtcdBackupSpec map: fields: @@ -4191,6 +4382,44 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.config.v1alpha1.MetricsServerConfig + map: + fields: + - name: audit + type: + namedType: com.github.openshift.api.config.v1alpha1.Audit + default: {} + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: resources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha1.ContainerResource + elementRelationship: associative + keys: + - name + - name: tolerations + type: + list: + elementType: + namedType: io.k8s.api.core.v1.Toleration + elementRelationship: atomic + - name: topologySpreadConstraints + type: + list: + elementType: + namedType: io.k8s.api.core.v1.TopologySpreadConstraint + elementRelationship: associative + keys: + - topologyKey + - whenUnsatisfiable + - name: verbosity + type: + scalar: string - name: com.github.openshift.api.config.v1alpha1.PKI map: fields: @@ -4527,12 +4756,45 @@ var schemaYAML = typed.YAMLObject(`types: - name: fieldRef type: namedType: io.k8s.api.core.v1.ObjectFieldSelector + - name: fileKeyRef + type: + namedType: io.k8s.api.core.v1.FileKeySelector - name: resourceFieldRef type: namedType: io.k8s.api.core.v1.ResourceFieldSelector - name: secretKeyRef type: namedType: io.k8s.api.core.v1.SecretKeySelector +- name: io.k8s.api.core.v1.FileKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + default: false + - name: path + type: + scalar: string + default: "" + - name: volumeName + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ModifyVolumeStatus + map: + fields: + - name: status + type: + scalar: string + default: "" + - name: targetVolumeAttributesClassName + type: + scalar: string - name: io.k8s.api.core.v1.ObjectFieldSelector map: fields: @@ -4544,6 +4806,126 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.PersistentVolumeClaim + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.core.v1.PersistentVolumeClaimSpec + default: {} + - name: status + type: + namedType: io.k8s.api.core.v1.PersistentVolumeClaimStatus + default: {} +- name: io.k8s.api.core.v1.PersistentVolumeClaimCondition + map: + fields: + - name: lastProbeTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.PersistentVolumeClaimSpec + map: + fields: + - name: accessModes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: dataSource + type: + namedType: io.k8s.api.core.v1.TypedLocalObjectReference + - name: dataSourceRef + type: + namedType: io.k8s.api.core.v1.TypedObjectReference + - name: resources + type: + namedType: io.k8s.api.core.v1.VolumeResourceRequirements + default: {} + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: storageClassName + type: + scalar: string + - name: volumeAttributesClassName + type: + scalar: string + - name: volumeMode + type: + scalar: string + - name: volumeName + type: + scalar: string +- name: io.k8s.api.core.v1.PersistentVolumeClaimStatus + map: + fields: + - name: accessModes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: allocatedResourceStatuses + type: + map: + elementType: + scalar: string + elementRelationship: separable + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: capacity + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PersistentVolumeClaimCondition + elementRelationship: associative + keys: + - type + - name: currentVolumeAttributesClassName + type: + scalar: string + - name: modifyVolumeStatus + type: + namedType: io.k8s.api.core.v1.ModifyVolumeStatus + - name: phase + type: + scalar: string - name: io.k8s.api.core.v1.ResourceClaim map: fields: @@ -4622,6 +5004,84 @@ var schemaYAML = typed.YAMLObject(`types: - name: value type: scalar: string +- name: io.k8s.api.core.v1.TopologySpreadConstraint + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: maxSkew + type: + scalar: numeric + default: 0 + - name: minDomains + type: + scalar: numeric + - name: nodeAffinityPolicy + type: + scalar: string + - name: nodeTaintsPolicy + type: + scalar: string + - name: topologyKey + type: + scalar: string + default: "" + - name: whenUnsatisfiable + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.TypedLocalObjectReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.TypedObjectReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string +- name: io.k8s.api.core.v1.VolumeResourceRequirements + map: + fields: + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: io.k8s.apimachinery.pkg.api.resource.Quantity scalar: untyped - name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index 70957eee8..afce6aef5 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -28,6 +28,7 @@ type ConfigV1Interface interface { ImageTagMirrorSetsGetter InfrastructuresGetter IngressesGetter + InsightsDataGathersGetter NetworksGetter NodesGetter OAuthsGetter @@ -106,6 +107,10 @@ func (c *ConfigV1Client) Ingresses() IngressInterface { return newIngresses(c) } +func (c *ConfigV1Client) InsightsDataGathers() InsightsDataGatherInterface { + return newInsightsDataGathers(c) +} + func (c *ConfigV1Client) Networks() NetworkInterface { return newNetworks(c) } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go index 44ad19dcb..27c5fd110 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -34,6 +34,8 @@ type InfrastructureExpansion interface{} type IngressExpansion interface{} +type InsightsDataGatherExpansion interface{} + type NetworkExpansion interface{} type NodeExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go new file mode 100644 index 000000000..43f662012 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/insightsdatagather.go @@ -0,0 +1,54 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// InsightsDataGathersGetter has a method to return a InsightsDataGatherInterface. +// A group's client should implement this interface. +type InsightsDataGathersGetter interface { + InsightsDataGathers() InsightsDataGatherInterface +} + +// InsightsDataGatherInterface has methods to work with InsightsDataGather resources. +type InsightsDataGatherInterface interface { + Create(ctx context.Context, insightsDataGather *configv1.InsightsDataGather, opts metav1.CreateOptions) (*configv1.InsightsDataGather, error) + Update(ctx context.Context, insightsDataGather *configv1.InsightsDataGather, opts metav1.UpdateOptions) (*configv1.InsightsDataGather, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*configv1.InsightsDataGather, error) + List(ctx context.Context, opts metav1.ListOptions) (*configv1.InsightsDataGatherList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *configv1.InsightsDataGather, err error) + Apply(ctx context.Context, insightsDataGather *applyconfigurationsconfigv1.InsightsDataGatherApplyConfiguration, opts metav1.ApplyOptions) (result *configv1.InsightsDataGather, err error) + InsightsDataGatherExpansion +} + +// insightsDataGathers implements InsightsDataGatherInterface +type insightsDataGathers struct { + *gentype.ClientWithListAndApply[*configv1.InsightsDataGather, *configv1.InsightsDataGatherList, *applyconfigurationsconfigv1.InsightsDataGatherApplyConfiguration] +} + +// newInsightsDataGathers returns a InsightsDataGathers +func newInsightsDataGathers(c *ConfigV1Client) *insightsDataGathers { + return &insightsDataGathers{ + gentype.NewClientWithListAndApply[*configv1.InsightsDataGather, *configv1.InsightsDataGatherList, *applyconfigurationsconfigv1.InsightsDataGatherApplyConfiguration]( + "insightsdatagathers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *configv1.InsightsDataGather { return &configv1.InsightsDataGather{} }, + func() *configv1.InsightsDataGatherList { return &configv1.InsightsDataGatherList{} }, + ), + } +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go new file mode 100644 index 000000000..53a173991 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/insightsdatagather.go @@ -0,0 +1,85 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + context "context" + time "time" + + apiconfigv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + configv1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherInformer provides access to a shared informer and lister for +// InsightsDataGathers. +type InsightsDataGatherInformer interface { + Informer() cache.SharedIndexInformer + Lister() configv1.InsightsDataGatherLister +} + +type insightsDataGatherInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().Watch(context.Background(), options) + }, + ListWithContextFunc: func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().List(ctx, options) + }, + WatchFuncWithContext: func(ctx context.Context, options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().InsightsDataGathers().Watch(ctx, options) + }, + }, + &apiconfigv1.InsightsDataGather{}, + resyncPeriod, + indexers, + ) +} + +func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiconfigv1.InsightsDataGather{}, f.defaultInformer) +} + +func (f *insightsDataGatherInformer) Lister() configv1.InsightsDataGatherLister { + return configv1.NewInsightsDataGatherLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go index ff4c521b0..0ad1b98f3 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -40,6 +40,8 @@ type Interface interface { Infrastructures() InfrastructureInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer + // InsightsDataGathers returns a InsightsDataGatherInformer. + InsightsDataGathers() InsightsDataGatherInformer // Networks returns a NetworkInformer. Networks() NetworkInformer // Nodes returns a NodeInformer. @@ -147,6 +149,11 @@ func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// InsightsDataGathers returns a InsightsDataGatherInformer. +func (v *version) InsightsDataGathers() InsightsDataGatherInformer { + return &insightsDataGatherInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Networks returns a NetworkInformer. func (v *version) Networks() NetworkInformer { return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index 59c98ea77..146e7e975 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -71,6 +71,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Infrastructures().Informer()}, nil case v1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Ingresses().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("insightsdatagathers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().InsightsDataGathers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("networks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Networks().Informer()}, nil case v1.SchemeGroupVersion.WithResource("nodes"): diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go index d4e79cd0e..ca93cb283 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go @@ -70,6 +70,10 @@ type InfrastructureListerExpansion interface{} // IngressLister. type IngressListerExpansion interface{} +// InsightsDataGatherListerExpansion allows custom methods to be added to +// InsightsDataGatherLister. +type InsightsDataGatherListerExpansion interface{} + // NetworkListerExpansion allows custom methods to be added to // NetworkLister. type NetworkListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go new file mode 100644 index 000000000..79da7823f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/insightsdatagather.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherLister helps list InsightsDataGathers. +// All objects returned here must be treated as read-only. +type InsightsDataGatherLister interface { + // List lists all InsightsDataGathers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*configv1.InsightsDataGather, err error) + // Get retrieves the InsightsDataGather from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*configv1.InsightsDataGather, error) + InsightsDataGatherListerExpansion +} + +// insightsDataGatherLister implements the InsightsDataGatherLister interface. +type insightsDataGatherLister struct { + listers.ResourceIndexer[*configv1.InsightsDataGather] +} + +// NewInsightsDataGatherLister returns a new InsightsDataGatherLister. +func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister { + return &insightsDataGatherLister{listers.New[*configv1.InsightsDataGather](indexer, configv1.Resource("insightsdatagather"))} +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index ddec4b73e..fa2661459 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -6,7 +6,7 @@ import ( fmt "fmt" sync "sync" - typed "sigs.k8s.io/structured-merge-diff/v4/typed" + typed "sigs.k8s.io/structured-merge-diff/v6/typed" ) func Parser() *typed.Parser { @@ -389,6 +389,42 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.operator.v1.BootImageSkewEnforcementConfig + map: + fields: + - name: manual + type: + namedType: com.github.openshift.api.operator.v1.ClusterBootImageManual + default: {} + - name: mode + type: + scalar: string + unions: + - discriminator: mode + fields: + - fieldName: manual + discriminatorValue: Manual +- name: com.github.openshift.api.operator.v1.BootImageSkewEnforcementStatus + map: + fields: + - name: automatic + type: + namedType: com.github.openshift.api.operator.v1.ClusterBootImageAutomatic + default: {} + - name: manual + type: + namedType: com.github.openshift.api.operator.v1.ClusterBootImageManual + default: {} + - name: mode + type: + scalar: string + unions: + - discriminator: mode + fields: + - fieldName: automatic + discriminatorValue: Automatic + - fieldName: manual + discriminatorValue: Manual - name: com.github.openshift.api.operator.v1.CSIDriverConfigSpec map: fields: @@ -614,6 +650,34 @@ var schemaYAML = typed.YAMLObject(`types: - name: version type: scalar: string +- name: com.github.openshift.api.operator.v1.ClusterBootImageAutomatic + map: + fields: + - name: ocpVersion + type: + scalar: string + - name: rhcosVersion + type: + scalar: string +- name: com.github.openshift.api.operator.v1.ClusterBootImageManual + map: + fields: + - name: mode + type: + scalar: string + - name: ocpVersion + type: + scalar: string + - name: rhcosVersion + type: + scalar: string + unions: + - discriminator: mode + fields: + - fieldName: ocpVersion + discriminatorValue: OCPVersion + - fieldName: rhcosVersion + discriminatorValue: RHCOSVersion - name: com.github.openshift.api.operator.v1.ClusterCSIDriver map: fields: @@ -2026,6 +2090,15 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: com.github.openshift.api.operator.v1.HealthCheck elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.IrreconcilableValidationOverrides + map: + fields: + - name: storage + type: + list: + elementType: + scalar: string + elementRelationship: associative - name: com.github.openshift.api.operator.v1.KubeAPIServer map: fields: @@ -2050,6 +2123,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.KubeAPIServerSpec map: fields: + - name: eventTTLMinutes + type: + scalar: numeric - name: failedRevisionLimit type: scalar: numeric @@ -2496,6 +2572,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.MachineConfigurationSpec map: fields: + - name: bootImageSkewEnforcement + type: + namedType: com.github.openshift.api.operator.v1.BootImageSkewEnforcementConfig + default: {} - name: failedRevisionLimit type: scalar: numeric @@ -2503,6 +2583,10 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: irreconcilableValidationOverrides + type: + namedType: com.github.openshift.api.operator.v1.IrreconcilableValidationOverrides + default: {} - name: logLevel type: scalar: string @@ -2533,6 +2617,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.MachineConfigurationStatus map: fields: + - name: bootImageSkewEnforcementStatus + type: + namedType: com.github.openshift.api.operator.v1.BootImageSkewEnforcementStatus + default: {} - name: conditions type: list: diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go index ec839a2ff..5d99e84b8 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/authentication.go @@ -64,6 +64,7 @@ func extractAuthentication(authentication *operatorv1.Authentication, fieldManag b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b AuthenticationApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *AuthenticationApplyConfiguration) WithStatus(value *AuthenticationStatu return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *AuthenticationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *AuthenticationApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementconfig.go new file mode 100644 index 000000000..b5b826de0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// BootImageSkewEnforcementConfigApplyConfiguration represents a declarative configuration of the BootImageSkewEnforcementConfig type for use +// with apply. +type BootImageSkewEnforcementConfigApplyConfiguration struct { + Mode *operatorv1.BootImageSkewEnforcementConfigMode `json:"mode,omitempty"` + Manual *ClusterBootImageManualApplyConfiguration `json:"manual,omitempty"` +} + +// BootImageSkewEnforcementConfigApplyConfiguration constructs a declarative configuration of the BootImageSkewEnforcementConfig type for use with +// apply. +func BootImageSkewEnforcementConfig() *BootImageSkewEnforcementConfigApplyConfiguration { + return &BootImageSkewEnforcementConfigApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *BootImageSkewEnforcementConfigApplyConfiguration) WithMode(value operatorv1.BootImageSkewEnforcementConfigMode) *BootImageSkewEnforcementConfigApplyConfiguration { + b.Mode = &value + return b +} + +// WithManual sets the Manual field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Manual field is set to the value of the last call. +func (b *BootImageSkewEnforcementConfigApplyConfiguration) WithManual(value *ClusterBootImageManualApplyConfiguration) *BootImageSkewEnforcementConfigApplyConfiguration { + b.Manual = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementstatus.go new file mode 100644 index 000000000..ae50d44ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bootimageskewenforcementstatus.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// BootImageSkewEnforcementStatusApplyConfiguration represents a declarative configuration of the BootImageSkewEnforcementStatus type for use +// with apply. +type BootImageSkewEnforcementStatusApplyConfiguration struct { + Mode *operatorv1.BootImageSkewEnforcementModeStatus `json:"mode,omitempty"` + Automatic *ClusterBootImageAutomaticApplyConfiguration `json:"automatic,omitempty"` + Manual *ClusterBootImageManualApplyConfiguration `json:"manual,omitempty"` +} + +// BootImageSkewEnforcementStatusApplyConfiguration constructs a declarative configuration of the BootImageSkewEnforcementStatus type for use with +// apply. +func BootImageSkewEnforcementStatus() *BootImageSkewEnforcementStatusApplyConfiguration { + return &BootImageSkewEnforcementStatusApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *BootImageSkewEnforcementStatusApplyConfiguration) WithMode(value operatorv1.BootImageSkewEnforcementModeStatus) *BootImageSkewEnforcementStatusApplyConfiguration { + b.Mode = &value + return b +} + +// WithAutomatic sets the Automatic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Automatic field is set to the value of the last call. +func (b *BootImageSkewEnforcementStatusApplyConfiguration) WithAutomatic(value *ClusterBootImageAutomaticApplyConfiguration) *BootImageSkewEnforcementStatusApplyConfiguration { + b.Automatic = value + return b +} + +// WithManual sets the Manual field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Manual field is set to the value of the last call. +func (b *BootImageSkewEnforcementStatusApplyConfiguration) WithManual(value *ClusterBootImageManualApplyConfiguration) *BootImageSkewEnforcementStatusApplyConfiguration { + b.Manual = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go index 148c6a440..4886cce3d 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/cloudcredential.go @@ -64,6 +64,7 @@ func extractCloudCredential(cloudCredential *operatorv1.CloudCredential, fieldMa b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b CloudCredentialApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *CloudCredentialApplyConfiguration) WithStatus(value *CloudCredentialSta return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *CloudCredentialApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *CloudCredentialApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *CloudCredentialApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *CloudCredentialApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimageautomatic.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimageautomatic.go new file mode 100644 index 000000000..58e7aa3ec --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimageautomatic.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterBootImageAutomaticApplyConfiguration represents a declarative configuration of the ClusterBootImageAutomatic type for use +// with apply. +type ClusterBootImageAutomaticApplyConfiguration struct { + OCPVersion *string `json:"ocpVersion,omitempty"` + RHCOSVersion *string `json:"rhcosVersion,omitempty"` +} + +// ClusterBootImageAutomaticApplyConfiguration constructs a declarative configuration of the ClusterBootImageAutomatic type for use with +// apply. +func ClusterBootImageAutomatic() *ClusterBootImageAutomaticApplyConfiguration { + return &ClusterBootImageAutomaticApplyConfiguration{} +} + +// WithOCPVersion sets the OCPVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OCPVersion field is set to the value of the last call. +func (b *ClusterBootImageAutomaticApplyConfiguration) WithOCPVersion(value string) *ClusterBootImageAutomaticApplyConfiguration { + b.OCPVersion = &value + return b +} + +// WithRHCOSVersion sets the RHCOSVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RHCOSVersion field is set to the value of the last call. +func (b *ClusterBootImageAutomaticApplyConfiguration) WithRHCOSVersion(value string) *ClusterBootImageAutomaticApplyConfiguration { + b.RHCOSVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimagemanual.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimagemanual.go new file mode 100644 index 000000000..0251eaef5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clusterbootimagemanual.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ClusterBootImageManualApplyConfiguration represents a declarative configuration of the ClusterBootImageManual type for use +// with apply. +type ClusterBootImageManualApplyConfiguration struct { + Mode *operatorv1.ClusterBootImageManualMode `json:"mode,omitempty"` + OCPVersion *string `json:"ocpVersion,omitempty"` + RHCOSVersion *string `json:"rhcosVersion,omitempty"` +} + +// ClusterBootImageManualApplyConfiguration constructs a declarative configuration of the ClusterBootImageManual type for use with +// apply. +func ClusterBootImageManual() *ClusterBootImageManualApplyConfiguration { + return &ClusterBootImageManualApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *ClusterBootImageManualApplyConfiguration) WithMode(value operatorv1.ClusterBootImageManualMode) *ClusterBootImageManualApplyConfiguration { + b.Mode = &value + return b +} + +// WithOCPVersion sets the OCPVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OCPVersion field is set to the value of the last call. +func (b *ClusterBootImageManualApplyConfiguration) WithOCPVersion(value string) *ClusterBootImageManualApplyConfiguration { + b.OCPVersion = &value + return b +} + +// WithRHCOSVersion sets the RHCOSVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RHCOSVersion field is set to the value of the last call. +func (b *ClusterBootImageManualApplyConfiguration) WithRHCOSVersion(value string) *ClusterBootImageManualApplyConfiguration { + b.RHCOSVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go index ed2dbb9c1..7bed556c6 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriver.go @@ -64,6 +64,7 @@ func extractClusterCSIDriver(clusterCSIDriver *operatorv1.ClusterCSIDriver, fiel b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b ClusterCSIDriverApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ClusterCSIDriverApplyConfiguration) WithStatus(value *ClusterCSIDriverS return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ClusterCSIDriverApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ClusterCSIDriverApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ClusterCSIDriverApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ClusterCSIDriverApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go index b884322ae..2753a7571 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/config.go @@ -64,6 +64,7 @@ func extractConfig(config *operatorv1.Config, fieldManager string, subresource s b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b ConfigApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ConfigApplyConfiguration) WithStatus(value *ConfigStatusApplyConfigurat return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ConfigApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ConfigApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConfigApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ConfigApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go index aaa69f64c..0ed0fe996 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/console.go @@ -64,6 +64,7 @@ func extractConsole(console *operatorv1.Console, fieldManager string, subresourc b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b ConsoleApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ConsoleApplyConfiguration) WithStatus(value *ConsoleStatusApplyConfigur return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ConsoleApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ConsoleApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go index d9b93f620..10bdf7971 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csisnapshotcontroller.go @@ -64,6 +64,7 @@ func extractCSISnapshotController(cSISnapshotController *operatorv1.CSISnapshotC b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b CSISnapshotControllerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *CSISnapshotControllerApplyConfiguration) WithStatus(value *CSISnapshotC return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *CSISnapshotControllerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *CSISnapshotControllerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *CSISnapshotControllerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *CSISnapshotControllerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go index 7b2cb3d36..f4d4a346e 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/dns.go @@ -64,6 +64,7 @@ func extractDNS(dNS *operatorv1.DNS, fieldManager string, subresource string) (* b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b DNSApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *DNSApplyConfiguration) WithStatus(value *DNSStatusApplyConfiguration) * return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *DNSApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *DNSApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *DNSApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *DNSApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go index de118401e..3a4467bf0 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/etcd.go @@ -64,6 +64,7 @@ func extractEtcd(etcd *operatorv1.Etcd, fieldManager string, subresource string) b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b EtcdApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *EtcdApplyConfiguration) WithStatus(value *EtcdStatusApplyConfiguration) return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *EtcdApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *EtcdApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *EtcdApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *EtcdApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go index e23139014..208dadbf0 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontroller.go @@ -66,6 +66,7 @@ func extractIngressController(ingressController *operatorv1.IngressController, f b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b IngressControllerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -241,8 +242,24 @@ func (b *IngressControllerApplyConfiguration) WithStatus(value *IngressControlle return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *IngressControllerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *IngressControllerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *IngressControllerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *IngressControllerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go index b694f1ca3..eb7d00707 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsoperator.go @@ -64,6 +64,7 @@ func extractInsightsOperator(insightsOperator *operatorv1.InsightsOperator, fiel b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b InsightsOperatorApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *InsightsOperatorApplyConfiguration) WithStatus(value *InsightsOperatorS return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *InsightsOperatorApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *InsightsOperatorApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *InsightsOperatorApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *InsightsOperatorApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/irreconcilablevalidationoverrides.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/irreconcilablevalidationoverrides.go new file mode 100644 index 000000000..8f9aaf48f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/irreconcilablevalidationoverrides.go @@ -0,0 +1,29 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// IrreconcilableValidationOverridesApplyConfiguration represents a declarative configuration of the IrreconcilableValidationOverrides type for use +// with apply. +type IrreconcilableValidationOverridesApplyConfiguration struct { + Storage []operatorv1.IrreconcilableValidationOverridesStorage `json:"storage,omitempty"` +} + +// IrreconcilableValidationOverridesApplyConfiguration constructs a declarative configuration of the IrreconcilableValidationOverrides type for use with +// apply. +func IrreconcilableValidationOverrides() *IrreconcilableValidationOverridesApplyConfiguration { + return &IrreconcilableValidationOverridesApplyConfiguration{} +} + +// WithStorage adds the given value to the Storage field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Storage field. +func (b *IrreconcilableValidationOverridesApplyConfiguration) WithStorage(values ...operatorv1.IrreconcilableValidationOverridesStorage) *IrreconcilableValidationOverridesApplyConfiguration { + for i := range values { + b.Storage = append(b.Storage, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go index 8ff4292bc..ece70f01f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserver.go @@ -64,6 +64,7 @@ func extractKubeAPIServer(kubeAPIServer *operatorv1.KubeAPIServer, fieldManager b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b KubeAPIServerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *KubeAPIServerApplyConfiguration) WithStatus(value *KubeAPIServerStatusA return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *KubeAPIServerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *KubeAPIServerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeAPIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *KubeAPIServerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go index 71b60a95b..e0bf29f0d 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverspec.go @@ -11,6 +11,7 @@ import ( // with apply. type KubeAPIServerSpecApplyConfiguration struct { StaticPodOperatorSpecApplyConfiguration `json:",inline"` + EventTTLMinutes *int32 `json:"eventTTLMinutes,omitempty"` } // KubeAPIServerSpecApplyConfiguration constructs a declarative configuration of the KubeAPIServerSpec type for use with @@ -82,3 +83,11 @@ func (b *KubeAPIServerSpecApplyConfiguration) WithSucceededRevisionLimit(value i b.StaticPodOperatorSpecApplyConfiguration.SucceededRevisionLimit = &value return b } + +// WithEventTTLMinutes sets the EventTTLMinutes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EventTTLMinutes field is set to the value of the last call. +func (b *KubeAPIServerSpecApplyConfiguration) WithEventTTLMinutes(value int32) *KubeAPIServerSpecApplyConfiguration { + b.EventTTLMinutes = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go index 731b6793a..696ce58b9 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubecontrollermanager.go @@ -64,6 +64,7 @@ func extractKubeControllerManager(kubeControllerManager *operatorv1.KubeControll b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b KubeControllerManagerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *KubeControllerManagerApplyConfiguration) WithStatus(value *KubeControll return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *KubeControllerManagerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *KubeControllerManagerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeControllerManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *KubeControllerManagerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go index 77e6ca343..81d44d065 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubescheduler.go @@ -64,6 +64,7 @@ func extractKubeScheduler(kubeScheduler *operatorv1.KubeScheduler, fieldManager b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b KubeSchedulerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *KubeSchedulerApplyConfiguration) WithStatus(value *KubeSchedulerStatusA return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *KubeSchedulerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *KubeSchedulerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeSchedulerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *KubeSchedulerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go index 5c84a133f..a645e0d3c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubestorageversionmigrator.go @@ -64,6 +64,7 @@ func extractKubeStorageVersionMigrator(kubeStorageVersionMigrator *operatorv1.Ku b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b KubeStorageVersionMigratorApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *KubeStorageVersionMigratorApplyConfiguration) WithStatus(value *KubeSto return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *KubeStorageVersionMigratorApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *KubeStorageVersionMigratorApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *KubeStorageVersionMigratorApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *KubeStorageVersionMigratorApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go index 35d2b867e..c4b20a473 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfiguration.go @@ -64,6 +64,7 @@ func extractMachineConfiguration(machineConfiguration *operatorv1.MachineConfigu b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b MachineConfigurationApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *MachineConfigurationApplyConfiguration) WithStatus(value *MachineConfig return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *MachineConfigurationApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *MachineConfigurationApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *MachineConfigurationApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *MachineConfigurationApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go index cee3c69fc..5c2e007db 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationspec.go @@ -11,8 +11,10 @@ import ( // with apply. type MachineConfigurationSpecApplyConfiguration struct { StaticPodOperatorSpecApplyConfiguration `json:",inline"` - ManagedBootImages *ManagedBootImagesApplyConfiguration `json:"managedBootImages,omitempty"` - NodeDisruptionPolicy *NodeDisruptionPolicyConfigApplyConfiguration `json:"nodeDisruptionPolicy,omitempty"` + ManagedBootImages *ManagedBootImagesApplyConfiguration `json:"managedBootImages,omitempty"` + NodeDisruptionPolicy *NodeDisruptionPolicyConfigApplyConfiguration `json:"nodeDisruptionPolicy,omitempty"` + IrreconcilableValidationOverrides *IrreconcilableValidationOverridesApplyConfiguration `json:"irreconcilableValidationOverrides,omitempty"` + BootImageSkewEnforcement *BootImageSkewEnforcementConfigApplyConfiguration `json:"bootImageSkewEnforcement,omitempty"` } // MachineConfigurationSpecApplyConfiguration constructs a declarative configuration of the MachineConfigurationSpec type for use with @@ -100,3 +102,19 @@ func (b *MachineConfigurationSpecApplyConfiguration) WithNodeDisruptionPolicy(va b.NodeDisruptionPolicy = value return b } + +// WithIrreconcilableValidationOverrides sets the IrreconcilableValidationOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IrreconcilableValidationOverrides field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithIrreconcilableValidationOverrides(value *IrreconcilableValidationOverridesApplyConfiguration) *MachineConfigurationSpecApplyConfiguration { + b.IrreconcilableValidationOverrides = value + return b +} + +// WithBootImageSkewEnforcement sets the BootImageSkewEnforcement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BootImageSkewEnforcement field is set to the value of the last call. +func (b *MachineConfigurationSpecApplyConfiguration) WithBootImageSkewEnforcement(value *BootImageSkewEnforcementConfigApplyConfiguration) *MachineConfigurationSpecApplyConfiguration { + b.BootImageSkewEnforcement = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go index 073ca7c54..f94d6fa7f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/machineconfigurationstatus.go @@ -9,10 +9,11 @@ import ( // MachineConfigurationStatusApplyConfiguration represents a declarative configuration of the MachineConfigurationStatus type for use // with apply. type MachineConfigurationStatusApplyConfiguration struct { - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` - NodeDisruptionPolicyStatus *NodeDisruptionPolicyStatusApplyConfiguration `json:"nodeDisruptionPolicyStatus,omitempty"` - ManagedBootImagesStatus *ManagedBootImagesApplyConfiguration `json:"managedBootImagesStatus,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + NodeDisruptionPolicyStatus *NodeDisruptionPolicyStatusApplyConfiguration `json:"nodeDisruptionPolicyStatus,omitempty"` + ManagedBootImagesStatus *ManagedBootImagesApplyConfiguration `json:"managedBootImagesStatus,omitempty"` + BootImageSkewEnforcementStatus *BootImageSkewEnforcementStatusApplyConfiguration `json:"bootImageSkewEnforcementStatus,omitempty"` } // MachineConfigurationStatusApplyConfiguration constructs a declarative configuration of the MachineConfigurationStatus type for use with @@ -57,3 +58,11 @@ func (b *MachineConfigurationStatusApplyConfiguration) WithManagedBootImagesStat b.ManagedBootImagesStatus = value return b } + +// WithBootImageSkewEnforcementStatus sets the BootImageSkewEnforcementStatus field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BootImageSkewEnforcementStatus field is set to the value of the last call. +func (b *MachineConfigurationStatusApplyConfiguration) WithBootImageSkewEnforcementStatus(value *BootImageSkewEnforcementStatusApplyConfiguration) *MachineConfigurationStatusApplyConfiguration { + b.BootImageSkewEnforcementStatus = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go index 0bdf453af..de6d39b40 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/network.go @@ -64,6 +64,7 @@ func extractNetwork(network *operatorv1.Network, fieldManager string, subresourc b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b NetworkApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *NetworkApplyConfiguration) WithStatus(value *NetworkStatusApplyConfigur return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *NetworkApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *NetworkApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go index 93b3c4e79..00aca4601 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go @@ -64,6 +64,7 @@ func extractOLM(oLM *operatorv1.OLM, fieldManager string, subresource string) (* b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b OLMApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *OLMApplyConfiguration) WithStatus(value *OLMStatusApplyConfiguration) * return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *OLMApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *OLMApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *OLMApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *OLMApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go index f78286043..5ce0bcb87 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftapiserver.go @@ -64,6 +64,7 @@ func extractOpenShiftAPIServer(openShiftAPIServer *operatorv1.OpenShiftAPIServer b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b OpenShiftAPIServerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *OpenShiftAPIServerApplyConfiguration) WithStatus(value *OpenShiftAPISer return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *OpenShiftAPIServerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *OpenShiftAPIServerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *OpenShiftAPIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *OpenShiftAPIServerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go index d8dbb4848..09a7d0585 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openshiftcontrollermanager.go @@ -64,6 +64,7 @@ func extractOpenShiftControllerManager(openShiftControllerManager *operatorv1.Op b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b OpenShiftControllerManagerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *OpenShiftControllerManagerApplyConfiguration) WithStatus(value *OpenShi return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *OpenShiftControllerManagerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *OpenShiftControllerManagerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *OpenShiftControllerManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *OpenShiftControllerManagerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go index bb0757381..bda7d6b21 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceca.go @@ -64,6 +64,7 @@ func extractServiceCA(serviceCA *operatorv1.ServiceCA, fieldManager string, subr b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b ServiceCAApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ServiceCAApplyConfiguration) WithStatus(value *ServiceCAStatusApplyConf return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ServiceCAApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ServiceCAApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCAApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ServiceCAApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go index 52981ca86..ca7eebebe 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogapiserver.go @@ -64,6 +64,7 @@ func extractServiceCatalogAPIServer(serviceCatalogAPIServer *operatorv1.ServiceC b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b ServiceCatalogAPIServerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ServiceCatalogAPIServerApplyConfiguration) WithStatus(value *ServiceCat return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ServiceCatalogAPIServerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ServiceCatalogAPIServerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCatalogAPIServerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ServiceCatalogAPIServerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go index f01957710..03d644ead 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/servicecatalogcontrollermanager.go @@ -64,6 +64,7 @@ func extractServiceCatalogControllerManager(serviceCatalogControllerManager *ope b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b ServiceCatalogControllerManagerApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *ServiceCatalogControllerManagerApplyConfiguration) WithStatus(value *Se return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *ServiceCatalogControllerManagerApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *ServiceCatalogControllerManagerApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *ServiceCatalogControllerManagerApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *ServiceCatalogControllerManagerApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go index fe464c41e..9a82f4bcf 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storage.go @@ -64,6 +64,7 @@ func extractStorage(storage *operatorv1.Storage, fieldManager string, subresourc b.WithAPIVersion("operator.openshift.io/v1") return b, nil } +func (b StorageApplyConfiguration) IsApplyConfiguration() {} // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. @@ -239,8 +240,24 @@ func (b *StorageApplyConfiguration) WithStatus(value *StorageStatusApplyConfigur return b } +// GetKind retrieves the value of the Kind field in the declarative configuration. +func (b *StorageApplyConfiguration) GetKind() *string { + return b.TypeMetaApplyConfiguration.Kind +} + +// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration. +func (b *StorageApplyConfiguration) GetAPIVersion() *string { + return b.TypeMetaApplyConfiguration.APIVersion +} + // GetName retrieves the value of the Name field in the declarative configuration. func (b *StorageApplyConfiguration) GetName() *string { b.ensureObjectMetaApplyConfigurationExists() return b.ObjectMetaApplyConfiguration.Name } + +// GetNamespace retrieves the value of the Namespace field in the declarative configuration. +func (b *StorageApplyConfiguration) GetNamespace() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Namespace +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/testutils/Makefile b/vendor/github.com/openshift/cluster-api-actuator-pkg/testutils/Makefile index d9c503ed9..3e90858a6 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/testutils/Makefile +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/testutils/Makefile @@ -6,7 +6,7 @@ GOPROXY ?= export GOPROXY # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.28 +ENVTEST_K8S_VERSION = 1.33.2 PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) ENVTEST = go run ${PROJECT_DIR}/vendor/sigs.k8s.io/controller-runtime/tools/setup-envtest @@ -27,7 +27,7 @@ lint: ## Go lint your code .PHONY: unit unit: ## Run unit tests - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path --bin-dir $(PROJECT_DIR)/bin)" ./hack/test.sh + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path --bin-dir $(PROJECT_DIR)/bin --index https://raw.githubusercontent.com/openshift/api/master/envtest-releases.yaml)" ./hack/test.sh .PHONY: help help: diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go index 1709b2395..3898b9824 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go +++ b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go @@ -101,7 +101,7 @@ func GetStatusDiff(oldStatus configv1.ClusterOperatorStatus, newStatus configv1. json.NewEncoder(originalJSON).Encode(oldStatus) newJSON := &bytes.Buffer{} json.NewEncoder(newJSON).Encode(newStatus) - messages = append(messages, diff.StringDiff(originalJSON.String(), newJSON.String())) + messages = append(messages, diff.Diff(originalJSON.String(), newJSON.String())) } return strings.Join(messages, ",") diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go index 0d6ffe673..f9ad1fc14 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go @@ -6,6 +6,7 @@ import ( "time" operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "github.com/openshift/library-go/pkg/controller/factory" @@ -82,7 +83,22 @@ func NewCertRotationController( return factory.New(). ResyncEvery(time.Minute). WithSync(c.Sync). - WithInformers( + WithFilteredEventsInformers( + func(obj interface{}) bool { + if cm, ok := obj.(*corev1.ConfigMap); ok { + return cm.Namespace == caBundleConfigMap.Namespace && cm.Name == caBundleConfigMap.Name + } + if secret, ok := obj.(*corev1.Secret); ok { + if secret.Namespace == rotatedSigningCASecret.Namespace && secret.Name == rotatedSigningCASecret.Name { + return true + } + if secret.Namespace == rotatedSelfSignedCertKeySecret.Namespace && secret.Name == rotatedSelfSignedCertKeySecret.Name { + return true + } + return false + } + return true + }, rotatedSigningCASecret.Informer.Informer(), caBundleConfigMap.Informer.Informer(), rotatedSelfSignedCertKeySecret.Informer.Informer(), @@ -122,14 +138,22 @@ func (c CertRotationController) getSigningCertKeyPairLocation() string { func (c CertRotationController) SyncWorker(ctx context.Context) error { signingCertKeyPair, _, err := c.RotatedSigningCASecret.EnsureSigningCertKeyPair(ctx) - if err != nil || signingCertKeyPair == nil { + if err != nil { return err } + // If no signingCertKeyPair returned due to update conflict or otherwise, return an error + if signingCertKeyPair == nil { + return fmt.Errorf("signingCertKeyPair is nil") + } cabundleCerts, err := c.CABundleConfigMap.EnsureConfigMapCABundle(ctx, signingCertKeyPair, c.getSigningCertKeyPairLocation()) if err != nil { return err } + // If no ca bundle returned due to update conflict or otherwise, return an error + if cabundleCerts == nil { + return fmt.Errorf("cabundleCerts is nil") + } if _, err := c.RotatedSelfSignedCertKeySecret.EnsureTargetCertKeyPair(ctx, signingCertKeyPair, cabundleCerts); err != nil { return err diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go index 94ed01d7f..88cd41189 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go @@ -164,6 +164,11 @@ func needNewTargetCertKeyPair(secret *corev1.Secret, signer *crypto.CA, caBundle return reason } + // Exit early if we're only refreshing when expired and the certificate does not need an update + if refreshOnlyWhenExpired { + return "" + } + // check the signer common name against all the common names in our ca bundle so we don't refresh early signerCommonName := annotations[CertificateIssuer] if len(signerCommonName) == 0 { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go index 418224998..68123d0e8 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -202,7 +202,7 @@ func (c ConfigObserver) sync(ctx context.Context, syncCtx factory.SyncContext) e func (c ConfigObserver) updateObservedConfig(ctx context.Context, syncCtx factory.SyncContext, existingConfig map[string]interface{}, mergedObservedConfig map[string]interface{}) error { if len(c.nestedConfigPath) == 0 { if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) { - syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig)) + syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.Diff(existingConfig, mergedObservedConfig)) return c.updateConfig(ctx, syncCtx, mergedObservedConfig, v1helpers.UpdateObservedConfigFn) } return nil @@ -217,7 +217,7 @@ func (c ConfigObserver) updateObservedConfig(ctx context.Context, syncCtx factor return fmt.Errorf("unable to extract the merged config under %v, err %v", c.nestedConfigPath, err) } if !equality.Semantic.DeepEqual(existingConfigNested, mergedObservedConfigNested) { - syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated section (%q) of observed config: %q", strings.Join(c.nestedConfigPath, "/"), diff.ObjectDiff(existingConfigNested, mergedObservedConfigNested)) + syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated section (%q) of observed config: %q", strings.Join(c.nestedConfigPath, "/"), diff.Diff(existingConfigNested, mergedObservedConfigNested)) return c.updateConfig(ctx, syncCtx, mergedObservedConfigNested, c.updateNestedConfigHelper) } return nil diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go index 3199d2db0..d44a5d571 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -142,6 +142,9 @@ func ApplyCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter if required.Annotations == nil { required.Annotations = map[string]string{} } + if required.Labels == nil { + required.Labels = map[string]string{} + } if err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec); err != nil { return nil, false, err } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go index 9832ede71..2953e3bd1 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/networking.go @@ -18,7 +18,7 @@ func init() { } func ReadNetworkPolicyV1OrDie(objBytes []byte) *networkingv1.NetworkPolicy { - requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(networkingv1.SchemeGroupVersion), objBytes) + requiredObj, err := runtime.Decode(netCodecs.UniversalDecoder(networkingv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go index cd3103124..4d38ef05d 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -32,19 +32,23 @@ type KubeInformersForNamespaces interface { var _ KubeInformersForNamespaces = kubeInformersForNamespaces{} -func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { +func NewKubeInformersForNamespacesWithResyncPeriod(kubeClient kubernetes.Interface, resyncInterval time.Duration, namespaces ...string) KubeInformersForNamespaces { ret := kubeInformersForNamespaces{} for _, namespace := range namespaces { if len(namespace) == 0 { - ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + ret[""] = informers.NewSharedInformerFactory(kubeClient, resyncInterval) continue } - ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncInterval, informers.WithNamespace(namespace)) } return ret } +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + return NewKubeInformersForNamespacesWithResyncPeriod(kubeClient, 10*time.Minute, namespaces...) +} + type kubeInformersForNamespaces map[string]informers.SharedInformerFactory // WaitForCacheSync waits for all started informers' cache were synced. diff --git a/vendor/github.com/raeperd/recvcheck/.gitignore b/vendor/github.com/raeperd/recvcheck/.gitignore index 035dc07e3..421267332 100644 --- a/vendor/github.com/raeperd/recvcheck/.gitignore +++ b/vendor/github.com/raeperd/recvcheck/.gitignore @@ -1,2 +1,3 @@ +.idea/ coverage.txt -cmd/recvcheck/recvcheck +/recvcheck diff --git a/vendor/github.com/raeperd/recvcheck/.golangci.yml b/vendor/github.com/raeperd/recvcheck/.golangci.yml new file mode 100644 index 000000000..18692d50b --- /dev/null +++ b/vendor/github.com/raeperd/recvcheck/.golangci.yml @@ -0,0 +1,10 @@ +linters: + enable: + - recvcheck + +output: + show-stats: true + sort-results: true + sort-order: + - linter + - file diff --git a/vendor/github.com/raeperd/recvcheck/Makefile b/vendor/github.com/raeperd/recvcheck/Makefile index 45ca47d9b..d78605a3b 100644 --- a/vendor/github.com/raeperd/recvcheck/Makefile +++ b/vendor/github.com/raeperd/recvcheck/Makefile @@ -1,12 +1,14 @@ -all: build test lint +.PHONY: clean lint test build -download: - go mod download +default: clean lint test build -build: download - go build -C cmd/recvcheck +clean: + rm -rf coverage.txt -test: +build: + go build -ldflags "-s -w" -trimpath ./cmd/recvcheck/ + +test: clean go test -race -coverprofile=coverage.txt . lint: diff --git a/vendor/github.com/raeperd/recvcheck/README.md b/vendor/github.com/raeperd/recvcheck/README.md index db84fe38e..067aa3c58 100644 --- a/vendor/github.com/raeperd/recvcheck/README.md +++ b/vendor/github.com/raeperd/recvcheck/README.md @@ -1,12 +1,12 @@ # recvcheck -[![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) [![codecov](https://codecov.io/gh/raeperd/recvcheck/graph/badge.svg?token=fPYgEHlq1e)](https://codecov.io/gh/raeperd/recvcheck) +[![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) Golang linter for check receiver type in method -## Motivtation +## Motivation From [Go Wiki: Go Code Review Comments - The Go Programming Language](https://go.dev/wiki/CodeReviewComments#receiver-type) > Don’t mix receiver types. Choose either pointers or struct types for all available method -Following code from [Dave Chenney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? +Following code from [Dave Cheney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? This linter does it for you. ```go diff --git a/vendor/github.com/raeperd/recvcheck/analyzer.go b/vendor/github.com/raeperd/recvcheck/analyzer.go index e80dfc577..11fb38e72 100644 --- a/vendor/github.com/raeperd/recvcheck/analyzer.go +++ b/vendor/github.com/raeperd/recvcheck/analyzer.go @@ -8,14 +8,58 @@ import ( "golang.org/x/tools/go/ast/inspector" ) -var Analyzer = &analysis.Analyzer{ - Name: "recvcheck", - Doc: "checks for receiver type consistency", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, +// NewAnalyzer returns a new analyzer to check for receiver type consistency. +func NewAnalyzer(s Settings) *analysis.Analyzer { + a := &analyzer{ + excluded: map[string]struct{}{}, + } + + if !s.DisableBuiltin { + // Default excludes for Marshal/Encode methods https://github.com/raeperd/recvcheck/issues/7 + a.excluded = map[string]struct{}{ + "*.MarshalText": {}, + "*.MarshalJSON": {}, + "*.MarshalYAML": {}, + "*.MarshalXML": {}, + "*.MarshalBinary": {}, + "*.GobEncode": {}, + } + } + + for _, exclusion := range s.Exclusions { + a.excluded[exclusion] = struct{}{} + } + + return &analysis.Analyzer{ + Name: "recvcheck", + Doc: "checks for receiver type consistency", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } } -func run(pass *analysis.Pass) (any, error) { +// Settings is the configuration for the analyzer. +type Settings struct { + // DisableBuiltin if true, disables the built-in method excludes. + // Built-in excluded methods: + // - "MarshalText" + // - "MarshalJSON" + // - "MarshalYAML" + // - "MarshalXML" + // - "MarshalBinary" + // - "GobEncode" + DisableBuiltin bool + + // Exclusions format is `struct_name.method_name` (ex: `Foo.MethodName`). + // A wildcard `*` can use as a struct name (ex: `*.MethodName`). + Exclusions []string +} + +type analyzer struct { + excluded map[string]struct{} +} + +func (r *analyzer) run(pass *analysis.Pass) (any, error) { inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) structs := map[string]*structType{} @@ -25,45 +69,67 @@ func run(pass *analysis.Pass) (any, error) { return } - var recv *ast.Ident - var isStar bool - switch recvType := funcDecl.Recv.List[0].Type.(type) { - case *ast.StarExpr: - isStar = true - if recv, ok = recvType.X.(*ast.Ident); !ok { - return - } - case *ast.Ident: - recv = recvType - default: + recv, isStar := recvTypeIdent(funcDecl.Recv.List[0].Type) + if recv == nil { return } - var st *structType - st, ok = structs[recv.Name] + if r.isExcluded(recv, funcDecl) { + return + } + + st, ok := structs[recv.Name] if !ok { - structs[recv.Name] = &structType{recv: recv.Name} + structs[recv.Name] = &structType{} st = structs[recv.Name] } if isStar { - st.numStarMethod++ + st.starUsed = true } else { - st.numTypeMethod++ + st.typeUsed = true } }) - for _, st := range structs { - if st.numStarMethod > 0 && st.numTypeMethod > 0 { - pass.Reportf(pass.Pkg.Scope().Lookup(st.recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", st.recv) + for recv, st := range structs { + if st.starUsed && st.typeUsed { + pass.Reportf(pass.Pkg.Scope().Lookup(recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", recv) } } return nil, nil } +func (r *analyzer) isExcluded(recv *ast.Ident, f *ast.FuncDecl) bool { + if f.Name == nil || f.Name.Name == "" { + return true + } + + _, found := r.excluded[recv.Name+"."+f.Name.Name] + if found { + return true + } + + _, found = r.excluded["*."+f.Name.Name] + + return found +} + type structType struct { - recv string - numStarMethod int - numTypeMethod int + starUsed bool + typeUsed bool +} + +func recvTypeIdent(r ast.Expr) (*ast.Ident, bool) { + switch n := r.(type) { + case *ast.StarExpr: + if i, ok := n.X.(*ast.Ident); ok { + return i, true + } + + case *ast.Ident: + return n, false + } + + return nil, false } diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go b/vendor/github.com/rogpeppe/go-internal/diff/diff.go similarity index 98% rename from vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go rename to vendor/github.com/rogpeppe/go-internal/diff/diff.go index 6a40b23fc..9bd8bd781 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go +++ b/vendor/github.com/rogpeppe/go-internal/diff/diff.go @@ -74,7 +74,7 @@ func Diff(oldName string, old []byte, newName string, new []byte) []byte { continue } - // Expand matching lines as far as possible, + // Expand matching lines as far possible, // establishing that x[start.x:end.x] == y[start.y:end.y]. // Note that on the first (or last) iteration we may (or definitely do) // have an empty match: start.x==end.x and start.y==end.y. @@ -116,10 +116,7 @@ func Diff(oldName string, old []byte, newName string, new []byte) []byte { // End chunk with common lines for context. if len(ctext) > 0 { - n := end.x - start.x - if n > C { - n = C - } + n := min(end.x-start.x, C) for _, s := range x[start.x : start.x+n] { ctext = append(ctext, " "+s) count.x++ @@ -234,7 +231,7 @@ func tgs(x, y []string) []pair { for i := range T { T[i] = n + 1 } - for i := 0; i < n; i++ { + for i := range n { k := sort.Search(n, func(k int) bool { return T[k] >= J[i] }) diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc index 3ce7171a3..2e0f9f5f7 100644 --- a/vendor/github.com/sagikazarmark/locafero/.envrc +++ b/vendor/github.com/sagikazarmark/locafero/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" fi use flake . --impure diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go index 754c8b260..ef8d54712 100644 --- a/vendor/github.com/sagikazarmark/locafero/finder.go +++ b/vendor/github.com/sagikazarmark/locafero/finder.go @@ -27,7 +27,7 @@ type Finder struct { // It provides the capability to search for entries with depth, // meaning it can target deeper locations within the directory structure. // - // It also supports glob syntax (as defined by [filepat.Match]), offering greater flexibility in search patterns. + // It also supports glob syntax (as defined by [filepath.Match]), offering greater flexibility in search patterns. // // Examples: // - config.yaml @@ -63,7 +63,7 @@ func (f Finder) Find(fsys afero.Fs) ([]string, error) { // pool.Go(func() ([]string, error) { // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, "*?[]\\^") { + // if strings.ContainsAny(searchName, globMatch) { // return globWalkSearch(fsys, searchPath, searchName, f.Type) // } // @@ -79,7 +79,7 @@ func (f Finder) Find(fsys afero.Fs) ([]string, error) { allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, "*?[]\\^") { + if strings.ContainsAny(item.name, globMatch) { return globWalkSearch(fsys, item.path, item.name, f.Type) } diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock index 46d28f805..df2a8ccec 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.lock +++ b/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -1,22 +1,84 @@ { "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, "devenv": { "inputs": { - "flake-compat": "flake-compat", + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1725907707, + "narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=", + "owner": "cachix", + "repo": "devenv", + "rev": "2bbbbc468fc02257265a79652a8350651cca495a", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], "nix": "nix", "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] }, "locked": { - "lastModified": 1694097209, - "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", "owner": "cachix", "repo": "devenv", - "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", "type": "github" }, "original": { "owner": "cachix", + "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -37,16 +99,32 @@ "type": "github" } }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-parts": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1693611461, - "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", + "lastModified": 1725234343, + "narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", + "rev": "567b938d64d4b4112ee253b9274472dc3a346eb6", "type": "github" }, "original": { @@ -60,11 +138,29 @@ "systems": "systems" }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -82,11 +178,11 @@ ] }, "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", "owner": "hercules-ci", "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", "type": "github" }, "original": { @@ -95,53 +191,90 @@ "type": "github" } }, - "lowdown-src": { - "flake": false, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { - "owner": "kristapsdz", - "repo": "lowdown", + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", "type": "github" } }, - "nix": { + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { "inputs": { - "lowdown-src": "lowdown-src", + "flake-compat": [ + "devenv", + "flake-compat" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression" + "nixpkgs-regression": "nixpkgs-regression_2" }, "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", "owner": "domenkozar", "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { "owner": "domenkozar", - "ref": "relaxed-flakes", + "ref": "devenv-2.21", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", "type": "github" }, "original": { @@ -153,23 +286,33 @@ }, "nixpkgs-lib": { "locked": { - "dir": "lib", - "lastModified": 1693471703, - "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", + "lastModified": 1725233747, + "narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" }, "original": { - "dir": "lib", "owner": "NixOS", - "ref": "nixos-unstable", "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" } }, - "nixpkgs-regression": { + "nixpkgs-regression_2": { "locked": { "lastModified": 1643052045, "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", @@ -187,27 +330,43 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1694343207, - "narHash": "sha256-jWi7OwFxU5Owi4k2JmiL1sa/OuBCQtpaAesuj5LXC8w=", + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1725910328, + "narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "78058d810644f5ed276804ce7ea9e82d92bee293", + "rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4", "type": "github" }, "original": { @@ -217,13 +376,38 @@ "type": "github" } }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, "pre-commit-hooks": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], - "flake-utils": "flake-utils", + "flake-utils": "flake-utils_2", "gitignore": "gitignore", "nixpkgs": [ "devenv", @@ -232,11 +416,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", "type": "github" }, "original": { @@ -249,7 +433,7 @@ "inputs": { "devenv": "devenv", "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_3" } }, "systems": { @@ -266,6 +450,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix index 209ecf286..312f1ec8c 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.nix +++ b/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -20,6 +20,7 @@ default = { languages = { go.enable = true; + go.package = pkgs.lib.mkDefault pkgs.go_1_23; }; packages = with pkgs; [ @@ -34,11 +35,27 @@ ci = devenv.shells.default; - ci_1_20 = { + ci_1_21 = { imports = [ devenv.shells.ci ]; languages = { - go.package = pkgs.go_1_20; + go.package = pkgs.go_1_21; + }; + }; + + ci_1_22 = { + imports = [ devenv.shells.ci ]; + + languages = { + go.package = pkgs.go_1_22; + }; + }; + + ci_1_23 = { + imports = [ devenv.shells.ci ]; + + languages = { + go.package = pkgs.go_1_23; }; }; }; diff --git a/vendor/github.com/sagikazarmark/locafero/glob.go b/vendor/github.com/sagikazarmark/locafero/glob.go new file mode 100644 index 000000000..00f833e99 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/glob.go @@ -0,0 +1,5 @@ +//go:build !windows + +package locafero + +const globMatch = "*?[]\\^" diff --git a/vendor/github.com/sagikazarmark/locafero/glob_windows.go b/vendor/github.com/sagikazarmark/locafero/glob_windows.go new file mode 100644 index 000000000..7aec2b247 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/glob_windows.go @@ -0,0 +1,8 @@ +//go:build windows + +package locafero + +// See [filepath.Match]: +// +// On Windows, escaping is disabled. Instead, '\\' is treated as path separator. +const globMatch = "*?[]^" diff --git a/vendor/github.com/sagikazarmark/slog-shim/.envrc b/vendor/github.com/sagikazarmark/slog-shim/.envrc deleted file mode 100644 index 3ce7171a3..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" -fi -use flake . --impure diff --git a/vendor/github.com/sagikazarmark/slog-shim/.gitignore b/vendor/github.com/sagikazarmark/slog-shim/.gitignore deleted file mode 100644 index dc6d8b587..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/.devenv/ -/.direnv/ -/.task/ -/build/ diff --git a/vendor/github.com/sagikazarmark/slog-shim/README.md b/vendor/github.com/sagikazarmark/slog-shim/README.md deleted file mode 100644 index 1f5be85e1..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# [slog](https://pkg.go.dev/log/slog) shim - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/slog-shim/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/slog-shim/actions/workflows/ci.yaml) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/slog-shim) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) - -Go 1.21 introduced a [new structured logging package](https://golang.org/doc/go1.21#slog), `log/slog`, to the standard library. -Although it's been eagerly anticipated by many, widespread adoption isn't expected to occur immediately, -especially since updating to Go 1.21 is a decision that most libraries won't make overnight. - -Before this package was added to the standard library, there was an _experimental_ version available at [golang.org/x/exp/slog](https://pkg.go.dev/golang.org/x/exp/slog). -While it's generally advised against using experimental packages in production, -this one served as a sort of backport package for the last few years, -incorporating new features before they were added to the standard library (like `slices`, `maps` or `errors`). - -This package serves as a bridge, helping libraries integrate slog in a backward-compatible way without having to immediately update their Go version requirement to 1.21. On Go 1.21 (and above), it acts as a drop-in replacement for `log/slog`, while below 1.21 it falls back to `golang.org/x/exp/slog`. - -**How does it achieve backwards compatibility?** - -Although there's no consensus on whether dropping support for older Go versions is considered backward compatible, a majority seems to believe it is. -(I don't have scientific proof for this, but it's based on conversations with various individuals across different channels.) - -This package adheres to that interpretation of backward compatibility. On Go 1.21, the shim uses type aliases to offer the same API as `slog/log`. -Once a library upgrades its version requirement to Go 1.21, it should be able to discard this shim and use `log/slog` directly. - -For older Go versions, the library might become unstable after removing the shim. -However, since those older versions are no longer supported, the promise of backward compatibility remains intact. - -## Installation - -```shell -go get github.com/sagikazarmark/slog-shim -``` - -## Usage - -Import this package into your library and use it in your public API: - -```go -package mylib - -import slog "github.com/sagikazarmark/slog-shim" - -func New(logger *slog.Logger) MyLib { - // ... -} -``` - -When using the library, clients can either use `log/slog` (when on Go 1.21) or `golang.org/x/exp/slog` (below Go 1.21): - -```go -package main - -import "log/slog" - -// OR - -import "golang.org/x/exp/slog" - -mylib.New(slog.Default()) -``` - -**Make sure consumers are aware that your API behaves differently on different Go versions.** - -Once you bump your Go version requirement to Go 1.21, you can drop the shim entirely from your code: - -```diff -package mylib - -- import slog "github.com/sagikazarmark/slog-shim" -+ import "log/slog" - -func New(logger *slog.Logger) MyLib { - // ... -} -``` - -## License - -The project is licensed under a [BSD-style license](LICENSE). diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr.go b/vendor/github.com/sagikazarmark/slog-shim/attr.go deleted file mode 100644 index 89608bf3a..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/attr.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// An Attr is a key-value pair. -type Attr = slog.Attr - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return slog.String(key, value) -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return slog.Int64(key, value) -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return slog.Int(key, value) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return slog.Uint64(key, v) -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return slog.Float64(key, v) -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return slog.Bool(key, v) -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return slog.Time(key, v) -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return slog.Duration(key, v) -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return slog.Group(key, args...) -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return slog.Any(key, value) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go deleted file mode 100644 index b66481333..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// An Attr is a key-value pair. -type Attr = slog.Attr - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return slog.String(key, value) -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return slog.Int64(key, value) -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return slog.Int(key, value) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return slog.Uint64(key, v) -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return slog.Float64(key, v) -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return slog.Bool(key, v) -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return slog.Time(key, v) -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return slog.Duration(key, v) -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return slog.Group(key, args...) -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return slog.Any(key, value) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.lock b/vendor/github.com/sagikazarmark/slog-shim/flake.lock deleted file mode 100644 index 7e8898e9e..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/flake.lock +++ /dev/null @@ -1,273 +0,0 @@ -{ - "nodes": { - "devenv": { - "inputs": { - "flake-compat": "flake-compat", - "nix": "nix", - "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1694097209, - "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", - "owner": "cachix", - "repo": "devenv", - "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1693611461, - "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "lowdown-src": { - "flake": false, - "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, - "nix": { - "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", - "owner": "domenkozar", - "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "relaxed-flakes", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "dir": "lib", - "lastModified": 1693471703, - "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", - "type": "github" - }, - "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1694345580, - "narHash": "sha256-BbG0NUxQTz1dN/Y87yPWZc/0Kp/coJ0vM3+7sNa5kUM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "f002de6834fdde9c864f33c1ec51da7df19cd832", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "master", - "repo": "nixpkgs", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.nix b/vendor/github.com/sagikazarmark/slog-shim/flake.nix deleted file mode 100644 index 7239bbc2e..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/flake.nix +++ /dev/null @@ -1,57 +0,0 @@ -{ - inputs = { - # nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - nixpkgs.url = "github:NixOS/nixpkgs/master"; - flake-parts.url = "github:hercules-ci/flake-parts"; - devenv.url = "github:cachix/devenv"; - }; - - outputs = inputs@{ flake-parts, ... }: - flake-parts.lib.mkFlake { inherit inputs; } { - imports = [ - inputs.devenv.flakeModule - ]; - - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; - - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_21; - }; - - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; - - ci_1_19 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_19; - }; - }; - - ci_1_20 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_20; - }; - }; - - ci_1_21 = { - imports = [ devenv.shells.ci ]; - - languages = { - go.package = pkgs.go_1_21; - }; - }; - }; - }; - }; -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler.go b/vendor/github.com/sagikazarmark/slog-shim/handler.go deleted file mode 100644 index f55556ae1..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/handler.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler = slog.Handler - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions = slog.HandlerOptions - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = slog.TimeKey - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = slog.LevelKey - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = slog.MessageKey - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = slog.SourceKey -) diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go deleted file mode 100644 index 670057573..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "golang.org/x/exp/slog" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler = slog.Handler - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions = slog.HandlerOptions - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = slog.TimeKey - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = slog.LevelKey - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = slog.MessageKey - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = slog.SourceKey -) diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go deleted file mode 100644 index 7c22bd81e..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "io" - "log/slog" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler = slog.JSONHandler - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - return slog.NewJSONHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go deleted file mode 100644 index 7b14f10ba..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "io" - - "golang.org/x/exp/slog" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler = slog.JSONHandler - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - return slog.NewJSONHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/level.go b/vendor/github.com/sagikazarmark/slog-shim/level.go deleted file mode 100644 index 07288cf89..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/level.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level = slog.Level - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = slog.LevelDebug - LevelInfo Level = slog.LevelInfo - LevelWarn Level = slog.LevelWarn - LevelError Level = slog.LevelError -) - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar = slog.LevelVar - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/level_120.go b/vendor/github.com/sagikazarmark/slog-shim/level_120.go deleted file mode 100644 index d3feb9420..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/level_120.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "golang.org/x/exp/slog" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level = slog.Level - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = slog.LevelDebug - LevelInfo Level = slog.LevelInfo - LevelWarn Level = slog.LevelWarn - LevelError Level = slog.LevelError -) - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar = slog.LevelVar - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger.go b/vendor/github.com/sagikazarmark/slog-shim/logger.go deleted file mode 100644 index e80036bec..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/logger.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "context" - "log" - "log/slog" -) - -// Default returns the default Logger. -func Default() *Logger { return slog.Default() } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - slog.SetDefault(l) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger = slog.Logger - -// New creates a new Logger with the given non-nil Handler. -func New(h Handler) *Logger { - return slog.New(h) -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return slog.With(args...) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return slog.NewLogLogger(h, level) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - slog.Debug(msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - slog.DebugContext(ctx, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - slog.Info(msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - slog.InfoContext(ctx, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - slog.Warn(msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - slog.WarnContext(ctx, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - slog.Error(msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - slog.ErrorContext(ctx, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - slog.Log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - slog.LogAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go deleted file mode 100644 index 97ebdd5e1..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "context" - "log" - - "golang.org/x/exp/slog" -) - -// Default returns the default Logger. -func Default() *Logger { return slog.Default() } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - slog.SetDefault(l) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger = slog.Logger - -// New creates a new Logger with the given non-nil Handler. -func New(h Handler) *Logger { - return slog.New(h) -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return slog.With(args...) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return slog.NewLogLogger(h, level) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - slog.Debug(msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - slog.DebugContext(ctx, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - slog.Info(msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - slog.InfoContext(ctx, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - slog.Warn(msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - slog.WarnContext(ctx, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - slog.Error(msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - slog.ErrorContext(ctx, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - slog.Log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - slog.LogAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/record.go b/vendor/github.com/sagikazarmark/slog-shim/record.go deleted file mode 100644 index 85ad1f784..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Call [NewRecord] to create a new Record. -// Use [Record.Clone] to create a copy with no shared state. -type Record = slog.Record - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return slog.NewRecord(t, level, msg, pc) -} - -// Source describes the location of a line of source code. -type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/record_120.go b/vendor/github.com/sagikazarmark/slog-shim/record_120.go deleted file mode 100644 index c2eaf4e79..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/record_120.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Call [NewRecord] to create a new Record. -// Use [Record.Clone] to create a copy with no shared state. -type Record = slog.Record - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return slog.NewRecord(t, level, msg, pc) -} - -// Source describes the location of a line of source code. -type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go deleted file mode 100644 index 45f6cfcba..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "io" - "log/slog" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler = slog.TextHandler - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - return slog.NewTextHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go deleted file mode 100644 index a69d63cce..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "io" - - "golang.org/x/exp/slog" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler = slog.TextHandler - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - return slog.NewTextHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/value.go b/vendor/github.com/sagikazarmark/slog-shim/value.go deleted file mode 100644 index 61173eb94..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/value.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value = slog.Value - -// Kind is the kind of a Value. -type Kind = slog.Kind - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. -const ( - KindAny = slog.KindAny - KindBool = slog.KindBool - KindDuration = slog.KindDuration - KindFloat64 = slog.KindFloat64 - KindInt64 = slog.KindInt64 - KindString = slog.KindString - KindTime = slog.KindTime - KindUint64 = slog.KindUint64 - KindGroup = slog.KindGroup - KindLogValuer = slog.KindLogValuer -) - -//////////////// Constructors - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return slog.StringValue(value) -} - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return slog.IntValue(v) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return slog.Int64Value(v) -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return slog.Uint64Value(v) -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return slog.Float64Value(v) -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - return slog.BoolValue(v) -} - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - return slog.TimeValue(v) -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return slog.DurationValue(v) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return slog.GroupValue(as...) -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - return slog.AnyValue(v) -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer = slog.LogValuer diff --git a/vendor/github.com/sagikazarmark/slog-shim/value_120.go b/vendor/github.com/sagikazarmark/slog-shim/value_120.go deleted file mode 100644 index 0f9f871ee..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/value_120.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value = slog.Value - -// Kind is the kind of a Value. -type Kind = slog.Kind - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. -const ( - KindAny = slog.KindAny - KindBool = slog.KindBool - KindDuration = slog.KindDuration - KindFloat64 = slog.KindFloat64 - KindInt64 = slog.KindInt64 - KindString = slog.KindString - KindTime = slog.KindTime - KindUint64 = slog.KindUint64 - KindGroup = slog.KindGroup - KindLogValuer = slog.KindLogValuer -) - -//////////////// Constructors - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return slog.StringValue(value) -} - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return slog.IntValue(v) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return slog.Int64Value(v) -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return slog.Uint64Value(v) -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return slog.Float64Value(v) -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - return slog.BoolValue(v) -} - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - return slog.TimeValue(v) -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return slog.DurationValue(v) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return slog.GroupValue(as...) -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - return slog.AnyValue(v) -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer = slog.LogValuer diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore b/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore deleted file mode 100644 index 3c0af3825..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.vscode -.idea -*.swp -cmd/jv/jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md deleted file mode 100644 index b0d05054c..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# jsonschema v5.3.1 - -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) -[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v5)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v5) -[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) -[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/master/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema) - -Package jsonschema provides json-schema compilation and validation. - -[Benchmarks](https://dev.to/vearutop/benchmarking-correctness-and-performance-of-go-json-schema-validators-3247) - -### Features: - - implements - [draft 2020-12](https://json-schema.org/specification-links.html#2020-12), - [draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8), - [draft-7](https://json-schema.org/specification-links.html#draft-7), - [draft-6](https://json-schema.org/specification-links.html#draft-6), - [draft-4](https://json-schema.org/specification-links.html#draft-4) - - fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional) - - list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L24)) - - validates schemas against meta-schema - - full support of remote references - - support of recursive references between schemas - - detects infinite loop in schemas - - thread safe validation - - rich, intuitive hierarchial error messages with json-pointers to exact location - - supports output formats flag, basic and detailed - - supports enabling format and content Assertions in draft2019-09 or above - - change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true` - - compiled schema can be introspected. easier to develop tools like generating go structs given schema - - supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension) - - implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat)) - - date-time, date, time, duration, period (supports leap-second) - - uuid, hostname, email - - ip-address, ipv4, ipv6 - - uri, uriref, uri-template(limited validation) - - json-pointer, relative-json-pointer - - regex, format - - implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) - - base64 - - implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) - - application/json - - can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader)) - - -see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) - -The schema is compiled against the version specified in `$schema` property. -If "$schema" property is missing, it uses latest draft which currently implemented -by this library. - -You can force to use specific version, when `$schema` is missing, as follows: - -```go -compiler := jsonschema.NewCompiler() -compiler.Draft = jsonschema.Draft4 -``` - -This package supports loading json-schema from filePath and fileURL. - -To load json-schema from HTTPURL, add following import: - -```go -import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" -``` - -## Rich Errors - -The ValidationError returned by Validate method contains detailed context to understand why and where the error is. - -schema.json: -```json -{ - "$ref": "t.json#/definitions/employee" -} -``` - -t.json: -```json -{ - "definitions": { - "employee": { - "type": "string" - } - } -} -``` - -doc.json: -```json -1 -``` - -assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`, -```go -fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy -``` -Prints: -``` -[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json# - [I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee' - [I#] [S#/definitions/employee/type] expected string, but got number -``` - -Here `I` stands for instance document and `S` stands for schema document. -The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. -Nested causes are printed with indent. - -To output `err` in `flag` output format: -```go -b, _ := json.MarshalIndent(err.FlagOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false -} -``` -To output `err` in `basic` output format: -```go -b, _ := json.MarshalIndent(err.BasicOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false, - "errors": [ - { - "keywordLocation": "", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", - "instanceLocation": "", - "error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#" - }, - { - "keywordLocation": "/$ref", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", - "instanceLocation": "", - "error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'" - }, - { - "keywordLocation": "/$ref/type", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", - "instanceLocation": "", - "error": "expected string, but got number" - } - ] -} -``` -To output `err` in `detailed` output format: -```go -b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false, - "keywordLocation": "", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", - "instanceLocation": "", - "errors": [ - { - "valid": false, - "keywordLocation": "/$ref", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", - "instanceLocation": "", - "errors": [ - { - "valid": false, - "keywordLocation": "/$ref/type", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", - "instanceLocation": "", - "error": "expected string, but got number" - } - ] - } - ] -} -``` - -## CLI - -to install `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` - -```bash -jv [-draft INT] [-output FORMAT] [-assertformat] [-assertcontent] []... - -assertcontent - enable content assertions with draft >= 2019 - -assertformat - enable format assertions with draft >= 2019 - -draft int - draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020) - -output string - output format. valid values flag, basic, detailed -``` - -if no `` arguments are passed, it simply validates the ``. -if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag - -exit-code is 1, if there are any validation errors - -`jv` can also validate yaml files. It also accepts schema from yaml files. - -## Validating YAML Documents - -since yaml supports non-string keys, such yaml documents are rendered as invalid json documents. - -most yaml parser use `map[interface{}]interface{}` for object, -whereas json parser uses `map[string]interface{}`. - -so we need to manually convert them to `map[string]interface{}`. -below code shows such conversion by `toStringKeys` function. - -https://play.golang.org/p/Hhax3MrtD8r - -NOTE: if you are using `gopkg.in/yaml.v3`, then you do not need such conversion. since this library -returns `map[string]interface{}` if all keys are strings. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go deleted file mode 100644 index fdb68e648..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go +++ /dev/null @@ -1,812 +0,0 @@ -package jsonschema - -import ( - "encoding/json" - "fmt" - "io" - "math/big" - "regexp" - "strconv" - "strings" -) - -// A Compiler represents a json-schema compiler. -type Compiler struct { - // Draft represents the draft used when '$schema' attribute is missing. - // - // This defaults to latest supported draft (currently 2020-12). - Draft *Draft - resources map[string]*resource - - // Extensions is used to register extensions. - extensions map[string]extension - - // ExtractAnnotations tells whether schema annotations has to be extracted - // in compiled Schema or not. - ExtractAnnotations bool - - // LoadURL loads the document at given absolute URL. - // - // If nil, package global LoadURL is used. - LoadURL func(s string) (io.ReadCloser, error) - - // Formats can be registered by adding to this map. Key is format name, - // value is function that knows how to validate that format. - Formats map[string]func(interface{}) bool - - // AssertFormat for specifications >= draft2019-09. - AssertFormat bool - - // Decoders can be registered by adding to this map. Key is encoding name, - // value is function that knows how to decode string in that format. - Decoders map[string]func(string) ([]byte, error) - - // MediaTypes can be registered by adding to this map. Key is mediaType name, - // value is function that knows how to validate that mediaType. - MediaTypes map[string]func([]byte) error - - // AssertContent for specifications >= draft2019-09. - AssertContent bool -} - -// Compile parses json-schema at given url returns, if successful, -// a Schema object that can be used to match against json. -// -// Returned error can be *SchemaError -func Compile(url string) (*Schema, error) { - return NewCompiler().Compile(url) -} - -// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. -// It simplifies safe initialization of global variables holding compiled Schemas. -func MustCompile(url string) *Schema { - return NewCompiler().MustCompile(url) -} - -// CompileString parses and compiles the given schema with given base url. -func CompileString(url, schema string) (*Schema, error) { - c := NewCompiler() - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - return nil, err - } - return c.Compile(url) -} - -// MustCompileString is like CompileString but panics on error. -// It simplified safe initialization of global variables holding compiled Schema. -func MustCompileString(url, schema string) *Schema { - c := NewCompiler() - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - panic(err) - } - return c.MustCompile(url) -} - -// NewCompiler returns a json-schema Compiler object. -// if '$schema' attribute is missing, it is treated as draft7. to change this -// behavior change Compiler.Draft value -func NewCompiler() *Compiler { - return &Compiler{ - Draft: latest, - resources: make(map[string]*resource), - Formats: make(map[string]func(interface{}) bool), - Decoders: make(map[string]func(string) ([]byte, error)), - MediaTypes: make(map[string]func([]byte) error), - extensions: make(map[string]extension), - } -} - -// AddResource adds in-memory resource to the compiler. -// -// Note that url must not have fragment -func (c *Compiler) AddResource(url string, r io.Reader) error { - res, err := newResource(url, r) - if err != nil { - return err - } - c.resources[res.url] = res - return nil -} - -// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. -// It simplifies safe initialization of global variables holding compiled Schemas. -func (c *Compiler) MustCompile(url string) *Schema { - s, err := c.Compile(url) - if err != nil { - panic(fmt.Sprintf("jsonschema: %#v", err)) - } - return s -} - -// Compile parses json-schema at given url returns, if successful, -// a Schema object that can be used to match against json. -// -// error returned will be of type *SchemaError -func (c *Compiler) Compile(url string) (*Schema, error) { - // make url absolute - u, err := toAbs(url) - if err != nil { - return nil, &SchemaError{url, err} - } - url = u - - sch, err := c.compileURL(url, nil, "#") - if err != nil { - err = &SchemaError{url, err} - } - return sch, err -} - -func (c *Compiler) findResource(url string) (*resource, error) { - if _, ok := c.resources[url]; !ok { - // load resource - var rdr io.Reader - if sch, ok := vocabSchemas[url]; ok { - rdr = strings.NewReader(sch) - } else { - loadURL := LoadURL - if c.LoadURL != nil { - loadURL = c.LoadURL - } - r, err := loadURL(url) - if err != nil { - return nil, err - } - defer r.Close() - rdr = r - } - if err := c.AddResource(url, rdr); err != nil { - return nil, err - } - } - - r := c.resources[url] - if r.draft != nil { - return r, nil - } - - // set draft - r.draft = c.Draft - if m, ok := r.doc.(map[string]interface{}); ok { - if sch, ok := m["$schema"]; ok { - sch, ok := sch.(string) - if !ok { - return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) - } - if !isURI(sch) { - return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url) - } - r.draft = findDraft(sch) - if r.draft == nil { - sch, _ := split(sch) - if sch == url { - return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url) - } - mr, err := c.findResource(sch) - if err != nil { - return nil, err - } - r.draft = mr.draft - } - } - } - - id, err := r.draft.resolveID(r.url, r.doc) - if err != nil { - return nil, err - } - if id != "" { - r.url = id - } - - if err := r.fillSubschemas(c, r); err != nil { - return nil, err - } - - return r, nil -} - -func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) { - // if url points to a draft, return Draft.meta - if d := findDraft(url); d != nil && d.meta != nil { - return d.meta, nil - } - - b, f := split(url) - r, err := c.findResource(b) - if err != nil { - return nil, err - } - return c.compileRef(r, stack, ptr, r, f) -} - -func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) { - base := r.baseURL(res.floc) - ref, err := resolveURL(base, ref) - if err != nil { - return nil, err - } - - u, f := split(ref) - sr := r.findResource(u) - if sr == nil { - // external resource - return c.compileURL(ref, stack, refPtr) - } - - // ensure root resource is always compiled first. - // this is required to get schema.meta from root resource - if r.schema == nil { - r.schema = newSchema(r.url, r.floc, r.draft, r.doc) - if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil { - return nil, err - } - } - - sr, err = r.resolveFragment(c, sr, f) - if err != nil { - return nil, err - } - if sr == nil { - return nil, fmt.Errorf("jsonschema: %s not found", ref) - } - - if sr.schema != nil { - if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil { - return nil, err - } - return sr.schema, nil - } - - sr.schema = newSchema(r.url, sr.floc, r.draft, sr.doc) - return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr) -} - -func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error { - if r.draft.version < 2020 { - return nil - } - - rr := r.listResources(res) - rr = append(rr, res) - for _, sr := range rr { - if m, ok := sr.doc.(map[string]interface{}); ok { - if _, ok := m["$dynamicAnchor"]; ok { - sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc) - if err != nil { - return err - } - res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch) - } - } - } - return nil -} - -func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) { - if err := c.compileDynamicAnchors(r, res); err != nil { - return nil, err - } - - switch v := res.doc.(type) { - case bool: - res.schema.Always = &v - return res.schema, nil - default: - return res.schema, c.compileMap(r, stack, sref, res) - } -} - -func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error { - m := res.doc.(map[string]interface{}) - - if err := checkLoop(stack, sref); err != nil { - return err - } - stack = append(stack, sref) - - var s = res.schema - var err error - - if r == res { // root schema - if sch, ok := m["$schema"]; ok { - sch := sch.(string) - if d := findDraft(sch); d != nil { - s.meta = d.meta - } else { - if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil { - return err - } - } - } - } - - if ref, ok := m["$ref"]; ok { - s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string)) - if err != nil { - return err - } - if r.draft.version < 2019 { - // All other properties in a "$ref" object MUST be ignored - return nil - } - } - - if r.draft.version >= 2019 { - if r == res { // root schema - if vocab, ok := m["$vocabulary"]; ok { - for url, reqd := range vocab.(map[string]interface{}) { - if reqd, ok := reqd.(bool); ok && !reqd { - continue - } - if !r.draft.isVocab(url) { - return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res) - } - s.vocab = append(s.vocab, url) - } - } else { - s.vocab = r.draft.defaultVocab - } - } - - if ref, ok := m["$recursiveRef"]; ok { - s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string)) - if err != nil { - return err - } - } - } - if r.draft.version >= 2020 { - if dref, ok := m["$dynamicRef"]; ok { - s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string)) - if err != nil { - return err - } - if dref, ok := dref.(string); ok { - _, frag := split(dref) - if frag != "#" && !strings.HasPrefix(frag, "#/") { - // frag is anchor - s.dynamicRefAnchor = frag[1:] - } - } - } - } - - loadInt := func(pname string) int { - if num, ok := m[pname]; ok { - i, _ := num.(json.Number).Float64() - return int(i) - } - return -1 - } - - loadRat := func(pname string) *big.Rat { - if num, ok := m[pname]; ok { - r, _ := new(big.Rat).SetString(string(num.(json.Number))) - return r - } - return nil - } - - if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") { - if t, ok := m["type"]; ok { - switch t := t.(type) { - case string: - s.Types = []string{t} - case []interface{}: - s.Types = toStrings(t) - } - } - - if e, ok := m["enum"]; ok { - s.Enum = e.([]interface{}) - allPrimitives := true - for _, item := range s.Enum { - switch jsonType(item) { - case "object", "array": - allPrimitives = false - break - } - } - s.enumError = "enum failed" - if allPrimitives { - if len(s.Enum) == 1 { - s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) - } else { - strEnum := make([]string, len(s.Enum)) - for i, item := range s.Enum { - strEnum[i] = fmt.Sprintf("%#v", item) - } - s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) - } - } - } - - s.Minimum = loadRat("minimum") - if exclusive, ok := m["exclusiveMinimum"]; ok { - if exclusive, ok := exclusive.(bool); ok { - if exclusive { - s.Minimum, s.ExclusiveMinimum = nil, s.Minimum - } - } else { - s.ExclusiveMinimum = loadRat("exclusiveMinimum") - } - } - - s.Maximum = loadRat("maximum") - if exclusive, ok := m["exclusiveMaximum"]; ok { - if exclusive, ok := exclusive.(bool); ok { - if exclusive { - s.Maximum, s.ExclusiveMaximum = nil, s.Maximum - } - } else { - s.ExclusiveMaximum = loadRat("exclusiveMaximum") - } - } - - s.MultipleOf = loadRat("multipleOf") - - s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") - - if req, ok := m["required"]; ok { - s.Required = toStrings(req.([]interface{})) - } - - s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") - - if unique, ok := m["uniqueItems"]; ok { - s.UniqueItems = unique.(bool) - } - - s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") - - if pattern, ok := m["pattern"]; ok { - s.Pattern = regexp.MustCompile(pattern.(string)) - } - - if r.draft.version >= 2019 { - s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains") - if s.MinContains == -1 { - s.MinContains = 1 - } - - if deps, ok := m["dependentRequired"]; ok { - deps := deps.(map[string]interface{}) - s.DependentRequired = make(map[string][]string, len(deps)) - for pname, pvalue := range deps { - s.DependentRequired[pname] = toStrings(pvalue.([]interface{})) - } - } - } - } - - compile := func(stack []schemaRef, ptr string) (*Schema, error) { - return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr) - } - - loadSchema := func(pname string, stack []schemaRef) (*Schema, error) { - if _, ok := m[pname]; ok { - return compile(stack, escape(pname)) - } - return nil, nil - } - - loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) { - if pvalue, ok := m[pname]; ok { - pvalue := pvalue.([]interface{}) - schemas := make([]*Schema, len(pvalue)) - for i := range pvalue { - sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i)) - if err != nil { - return nil, err - } - schemas[i] = sch - } - return schemas, nil - } - return nil, nil - } - - if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") { - if s.Not, err = loadSchema("not", stack); err != nil { - return err - } - if s.AllOf, err = loadSchemas("allOf", stack); err != nil { - return err - } - if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil { - return err - } - if s.OneOf, err = loadSchemas("oneOf", stack); err != nil { - return err - } - - if props, ok := m["properties"]; ok { - props := props.(map[string]interface{}) - s.Properties = make(map[string]*Schema, len(props)) - for pname := range props { - s.Properties[pname], err = compile(nil, "properties/"+escape(pname)) - if err != nil { - return err - } - } - } - - if regexProps, ok := m["regexProperties"]; ok { - s.RegexProperties = regexProps.(bool) - } - - if patternProps, ok := m["patternProperties"]; ok { - patternProps := patternProps.(map[string]interface{}) - s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) - for pattern := range patternProps { - s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern)) - if err != nil { - return err - } - } - } - - if additionalProps, ok := m["additionalProperties"]; ok { - switch additionalProps := additionalProps.(type) { - case bool: - s.AdditionalProperties = additionalProps - case map[string]interface{}: - s.AdditionalProperties, err = compile(nil, "additionalProperties") - if err != nil { - return err - } - } - } - - if deps, ok := m["dependencies"]; ok { - deps := deps.(map[string]interface{}) - s.Dependencies = make(map[string]interface{}, len(deps)) - for pname, pvalue := range deps { - switch pvalue := pvalue.(type) { - case []interface{}: - s.Dependencies[pname] = toStrings(pvalue) - default: - s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname)) - if err != nil { - return err - } - } - } - } - - if r.draft.version >= 6 { - if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil { - return err - } - if s.Contains, err = loadSchema("contains", nil); err != nil { - return err - } - } - - if r.draft.version >= 7 { - if m["if"] != nil { - if s.If, err = loadSchema("if", stack); err != nil { - return err - } - if s.Then, err = loadSchema("then", stack); err != nil { - return err - } - if s.Else, err = loadSchema("else", stack); err != nil { - return err - } - } - } - if r.draft.version >= 2019 { - if deps, ok := m["dependentSchemas"]; ok { - deps := deps.(map[string]interface{}) - s.DependentSchemas = make(map[string]*Schema, len(deps)) - for pname := range deps { - s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname)) - if err != nil { - return err - } - } - } - } - - if r.draft.version >= 2020 { - if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil { - return err - } - if s.Items2020, err = loadSchema("items", nil); err != nil { - return err - } - } else { - if items, ok := m["items"]; ok { - switch items.(type) { - case []interface{}: - s.Items, err = loadSchemas("items", nil) - if err != nil { - return err - } - if additionalItems, ok := m["additionalItems"]; ok { - switch additionalItems := additionalItems.(type) { - case bool: - s.AdditionalItems = additionalItems - case map[string]interface{}: - s.AdditionalItems, err = compile(nil, "additionalItems") - if err != nil { - return err - } - } - } - default: - s.Items, err = compile(nil, "items") - if err != nil { - return err - } - } - } - } - - } - - // unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020 - if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) { - if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil { - return err - } - if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil { - return err - } - if r.draft.version >= 2020 { - // any item in an array that passes validation of the contains schema is considered "evaluated" - s.ContainsEval = true - } - } - - if format, ok := m["format"]; ok { - s.Format = format.(string) - if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") { - if format, ok := c.Formats[s.Format]; ok { - s.format = format - } else { - s.format, _ = Formats[s.Format] - } - } - } - - if c.ExtractAnnotations { - if title, ok := m["title"]; ok { - s.Title = title.(string) - } - if description, ok := m["description"]; ok { - s.Description = description.(string) - } - s.Default = m["default"] - } - - if r.draft.version >= 6 { - if c, ok := m["const"]; ok { - s.Constant = []interface{}{c} - } - } - - if r.draft.version >= 7 { - if encoding, ok := m["contentEncoding"]; ok { - s.ContentEncoding = encoding.(string) - if decoder, ok := c.Decoders[s.ContentEncoding]; ok { - s.decoder = decoder - } else { - s.decoder, _ = Decoders[s.ContentEncoding] - } - } - if mediaType, ok := m["contentMediaType"]; ok { - s.ContentMediaType = mediaType.(string) - if mediaType, ok := c.MediaTypes[s.ContentMediaType]; ok { - s.mediaType = mediaType - } else { - s.mediaType, _ = MediaTypes[s.ContentMediaType] - } - if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil { - return err - } - } - if c.ExtractAnnotations { - if comment, ok := m["$comment"]; ok { - s.Comment = comment.(string) - } - if readOnly, ok := m["readOnly"]; ok { - s.ReadOnly = readOnly.(bool) - } - if writeOnly, ok := m["writeOnly"]; ok { - s.WriteOnly = writeOnly.(bool) - } - if examples, ok := m["examples"]; ok { - s.Examples = examples.([]interface{}) - } - } - } - - if r.draft.version >= 2019 { - if !c.AssertContent { - s.decoder = nil - s.mediaType = nil - s.ContentSchema = nil - } - if c.ExtractAnnotations { - if deprecated, ok := m["deprecated"]; ok { - s.Deprecated = deprecated.(bool) - } - } - } - - for name, ext := range c.extensions { - es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m) - if err != nil { - return err - } - if es != nil { - if s.Extensions == nil { - s.Extensions = make(map[string]ExtSchema) - } - s.Extensions[name] = es - } - } - - return nil -} - -func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error { - validate := func(meta *Schema) error { - if meta == nil { - return nil - } - return meta.validateValue(v, vloc) - } - - if err := validate(r.draft.meta); err != nil { - return err - } - for _, ext := range c.extensions { - if err := validate(ext.meta); err != nil { - return err - } - } - return nil -} - -func toStrings(arr []interface{}) []string { - s := make([]string, len(arr)) - for i, v := range arr { - s[i] = v.(string) - } - return s -} - -// SchemaRef captures schema and the path referring to it. -type schemaRef struct { - path string // relative-json-pointer to schema - schema *Schema // target schema - discard bool // true when scope left -} - -func (sr schemaRef) String() string { - return fmt.Sprintf("(%s)%v", sr.path, sr.schema) -} - -func checkLoop(stack []schemaRef, sref schemaRef) error { - for _, ref := range stack { - if ref.schema == sref.schema { - return infiniteLoopError(stack, sref) - } - } - return nil -} - -func keywordLocation(stack []schemaRef, path string) string { - var loc string - for _, ref := range stack[1:] { - loc += "/" + ref.path - } - if path != "" { - loc = loc + "/" + path - } - return loc -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go deleted file mode 100644 index 7570b8b5a..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go +++ /dev/null @@ -1,29 +0,0 @@ -package jsonschema - -import ( - "encoding/base64" - "encoding/json" -) - -// Decoders is a registry of functions, which know how to decode -// string encoded in specific format. -// -// New Decoders can be registered by adding to this map. Key is encoding name, -// value is function that knows how to decode string in that format. -var Decoders = map[string]func(string) ([]byte, error){ - "base64": base64.StdEncoding.DecodeString, -} - -// MediaTypes is a registry of functions, which know how to validate -// whether the bytes represent data of that mediaType. -// -// New mediaTypes can be registered by adding to this map. Key is mediaType name, -// value is function that knows how to validate that mediaType. -var MediaTypes = map[string]func([]byte) error{ - "application/json": validateJSON, -} - -func validateJSON(b []byte) error { - var v interface{} - return json.Unmarshal(b, &v) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go deleted file mode 100644 index a124262a5..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Package jsonschema provides json-schema compilation and validation. - -Features: - - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4 - - fully compliant with JSON-Schema-Test-Suite, (excluding some optional) - - list of optional tests that are excluded can be found in schema_test.go(variable skipTests) - - validates schemas against meta-schema - - full support of remote references - - support of recursive references between schemas - - detects infinite loop in schemas - - thread safe validation - - rich, intuitive hierarchial error messages with json-pointers to exact location - - supports output formats flag, basic and detailed - - supports enabling format and content Assertions in draft2019-09 or above - - change Compiler.AssertFormat, Compiler.AssertContent to true - - compiled schema can be introspected. easier to develop tools like generating go structs given schema - - supports user-defined keywords via extensions - - implements following formats (supports user-defined) - - date-time, date, time, duration (supports leap-second) - - uuid, hostname, email - - ip-address, ipv4, ipv6 - - uri, uriref, uri-template(limited validation) - - json-pointer, relative-json-pointer - - regex, format - - implements following contentEncoding (supports user-defined) - - base64 - - implements following contentMediaType (supports user-defined) - - application/json - - can load from files/http/https/string/[]byte/io.Reader (supports user-defined) - -The schema is compiled against the version specified in "$schema" property. -If "$schema" property is missing, it uses latest draft which currently implemented -by this library. - -You can force to use specific draft, when "$schema" is missing, as follows: - - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft4 - -This package supports loading json-schema from filePath and fileURL. - -To load json-schema from HTTPURL, add following import: - - import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" - -you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA -*/ -package jsonschema diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go deleted file mode 100644 index 154fa5837..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go +++ /dev/null @@ -1,1454 +0,0 @@ -package jsonschema - -import ( - "fmt" - "strconv" - "strings" -) - -// A Draft represents json-schema draft -type Draft struct { - version int - meta *Schema - id string // property name used to represent schema id. - boolSchema bool // is boolean valid schema - vocab []string // built-in vocab - defaultVocab []string // vocabs when $vocabulary is not used - subschemas map[string]position -} - -func (d *Draft) URL() string { - switch d.version { - case 2020: - return "https://json-schema.org/draft/2020-12/schema" - case 2019: - return "https://json-schema.org/draft/2019-09/schema" - case 7: - return "https://json-schema.org/draft-07/schema" - case 6: - return "https://json-schema.org/draft-06/schema" - case 4: - return "https://json-schema.org/draft-04/schema" - } - return "" -} - -func (d *Draft) String() string { - return fmt.Sprintf("Draft%d", d.version) -} - -func (d *Draft) loadMeta(url, schema string) { - c := NewCompiler() - c.AssertFormat = true - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - panic(err) - } - d.meta = c.MustCompile(url) - d.meta.meta = d.meta -} - -func (d *Draft) getID(sch interface{}) string { - m, ok := sch.(map[string]interface{}) - if !ok { - return "" - } - if _, ok := m["$ref"]; ok && d.version <= 7 { - // $ref prevents a sibling id from changing the base uri - return "" - } - v, ok := m[d.id] - if !ok { - return "" - } - id, ok := v.(string) - if !ok { - return "" - } - return id -} - -func (d *Draft) resolveID(base string, sch interface{}) (string, error) { - id, _ := split(d.getID(sch)) // strip fragment - if id == "" { - return "", nil - } - url, err := resolveURL(base, id) - url, _ = split(url) // strip fragment - return url, err -} - -func (d *Draft) anchors(sch interface{}) []string { - m, ok := sch.(map[string]interface{}) - if !ok { - return nil - } - - var anchors []string - - // before draft2019, anchor is specified in id - _, f := split(d.getID(m)) - if f != "#" { - anchors = append(anchors, f[1:]) - } - - if v, ok := m["$anchor"]; ok && d.version >= 2019 { - anchors = append(anchors, v.(string)) - } - if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 { - anchors = append(anchors, v.(string)) - } - return anchors -} - -// listSubschemas collects subschemas in r into rr. -func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error { - add := func(loc string, sch interface{}) error { - url, err := d.resolveID(base, sch) - if err != nil { - return err - } - floc := r.floc + "/" + loc - sr := &resource{url: url, floc: floc, doc: sch} - rr[floc] = sr - - base := base - if url != "" { - base = url - } - return d.listSubschemas(sr, base, rr) - } - - sch, ok := r.doc.(map[string]interface{}) - if !ok { - return nil - } - for kw, pos := range d.subschemas { - v, ok := sch[kw] - if !ok { - continue - } - if pos&self != 0 { - switch v := v.(type) { - case map[string]interface{}: - if err := add(kw, v); err != nil { - return err - } - case bool: - if d.boolSchema { - if err := add(kw, v); err != nil { - return err - } - } - } - } - if pos&item != 0 { - if v, ok := v.([]interface{}); ok { - for i, item := range v { - if err := add(kw+"/"+strconv.Itoa(i), item); err != nil { - return err - } - } - } - } - if pos&prop != 0 { - if v, ok := v.(map[string]interface{}); ok { - for pname, pval := range v { - if err := add(kw+"/"+escape(pname), pval); err != nil { - return err - } - } - } - } - } - return nil -} - -// isVocab tells whether url is built-in vocab. -func (d *Draft) isVocab(url string) bool { - for _, v := range d.vocab { - if url == v { - return true - } - } - return false -} - -type position uint - -const ( - self position = 1 << iota - prop - item -) - -// supported drafts -var ( - Draft4 = &Draft{version: 4, id: "id", boolSchema: false} - Draft6 = &Draft{version: 6, id: "$id", boolSchema: true} - Draft7 = &Draft{version: 7, id: "$id", boolSchema: true} - Draft2019 = &Draft{ - version: 2019, - id: "$id", - boolSchema: true, - vocab: []string{ - "https://json-schema.org/draft/2019-09/vocab/core", - "https://json-schema.org/draft/2019-09/vocab/applicator", - "https://json-schema.org/draft/2019-09/vocab/validation", - "https://json-schema.org/draft/2019-09/vocab/meta-data", - "https://json-schema.org/draft/2019-09/vocab/format", - "https://json-schema.org/draft/2019-09/vocab/content", - }, - defaultVocab: []string{ - "https://json-schema.org/draft/2019-09/vocab/core", - "https://json-schema.org/draft/2019-09/vocab/applicator", - "https://json-schema.org/draft/2019-09/vocab/validation", - }, - } - Draft2020 = &Draft{ - version: 2020, - id: "$id", - boolSchema: true, - vocab: []string{ - "https://json-schema.org/draft/2020-12/vocab/core", - "https://json-schema.org/draft/2020-12/vocab/applicator", - "https://json-schema.org/draft/2020-12/vocab/unevaluated", - "https://json-schema.org/draft/2020-12/vocab/validation", - "https://json-schema.org/draft/2020-12/vocab/meta-data", - "https://json-schema.org/draft/2020-12/vocab/format-annotation", - "https://json-schema.org/draft/2020-12/vocab/format-assertion", - "https://json-schema.org/draft/2020-12/vocab/content", - }, - defaultVocab: []string{ - "https://json-schema.org/draft/2020-12/vocab/core", - "https://json-schema.org/draft/2020-12/vocab/applicator", - "https://json-schema.org/draft/2020-12/vocab/unevaluated", - "https://json-schema.org/draft/2020-12/vocab/validation", - }, - } - - latest = Draft2020 -) - -func findDraft(url string) *Draft { - if strings.HasPrefix(url, "http://") { - url = "https://" + strings.TrimPrefix(url, "http://") - } - if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") { - url = url[:strings.IndexByte(url, '#')] - } - switch url { - case "https://json-schema.org/schema": - return latest - case "https://json-schema.org/draft/2020-12/schema": - return Draft2020 - case "https://json-schema.org/draft/2019-09/schema": - return Draft2019 - case "https://json-schema.org/draft-07/schema": - return Draft7 - case "https://json-schema.org/draft-06/schema": - return Draft6 - case "https://json-schema.org/draft-04/schema": - return Draft4 - } - return nil -} - -func init() { - subschemas := map[string]position{ - // type agnostic - "definitions": prop, - "not": self, - "allOf": item, - "anyOf": item, - "oneOf": item, - // object - "properties": prop, - "additionalProperties": self, - "patternProperties": prop, - // array - "items": self | item, - "additionalItems": self, - "dependencies": prop, - } - Draft4.subschemas = clone(subschemas) - - subschemas["propertyNames"] = self - subschemas["contains"] = self - Draft6.subschemas = clone(subschemas) - - subschemas["if"] = self - subschemas["then"] = self - subschemas["else"] = self - Draft7.subschemas = clone(subschemas) - - subschemas["$defs"] = prop - subschemas["dependentSchemas"] = prop - subschemas["unevaluatedProperties"] = self - subschemas["unevaluatedItems"] = self - subschemas["contentSchema"] = self - Draft2019.subschemas = clone(subschemas) - - subschemas["prefixItems"] = item - Draft2020.subschemas = clone(subschemas) - - Draft4.loadMeta("http://json-schema.org/draft-04/schema", `{ - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uriref" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "regexProperties": true, - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "regexProperties": { "type": "boolean" }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" }, - "format": { "type": "string" }, - "$ref": { "type": "string" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} - }`) - Draft6.loadMeta("http://json-schema.org/draft-06/schema", `{ - "$schema": "http://json-schema.org/draft-06/schema#", - "$id": "http://json-schema.org/draft-06/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "regexProperties": true, - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": {}, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": {} - }`) - Draft7.loadMeta("http://json-schema.org/draft-07/schema", `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://json-schema.org/draft-07/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$comment": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": true - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": true, - "enum": { - "type": "array", - "items": true, - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "if": { "$ref": "#" }, - "then": { "$ref": "#" }, - "else": { "$ref": "#" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": true - }`) - Draft2019.loadMeta("https://json-schema.org/draft/2019-09/schema", `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/schema", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/core": true, - "https://json-schema.org/draft/2019-09/vocab/applicator": true, - "https://json-schema.org/draft/2019-09/vocab/validation": true, - "https://json-schema.org/draft/2019-09/vocab/meta-data": true, - "https://json-schema.org/draft/2019-09/vocab/format": false, - "https://json-schema.org/draft/2019-09/vocab/content": true - }, - "$recursiveAnchor": true, - - "title": "Core and Validation specifications meta-schema", - "allOf": [ - {"$ref": "meta/core"}, - {"$ref": "meta/applicator"}, - {"$ref": "meta/validation"}, - {"$ref": "meta/meta-data"}, - {"$ref": "meta/format"}, - {"$ref": "meta/content"} - ], - "type": ["object", "boolean"], - "properties": { - "definitions": { - "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - }, - "dependencies": { - "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$recursiveRef": "#" }, - { "$ref": "meta/validation#/$defs/stringArray" } - ] - } - } - } - }`) - Draft2020.loadMeta("https://json-schema.org/draft/2020-12/schema", `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/schema", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": true, - "https://json-schema.org/draft/2020-12/vocab/applicator": true, - "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, - "https://json-schema.org/draft/2020-12/vocab/validation": true, - "https://json-schema.org/draft/2020-12/vocab/meta-data": true, - "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, - "https://json-schema.org/draft/2020-12/vocab/content": true - }, - "$dynamicAnchor": "meta", - - "title": "Core and Validation specifications meta-schema", - "allOf": [ - {"$ref": "meta/core"}, - {"$ref": "meta/applicator"}, - {"$ref": "meta/unevaluated"}, - {"$ref": "meta/validation"}, - {"$ref": "meta/meta-data"}, - {"$ref": "meta/format-annotation"}, - {"$ref": "meta/content"} - ], - "type": ["object", "boolean"], - "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", - "properties": { - "definitions": { - "$comment": "\"definitions\" has been replaced by \"$defs\".", - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "deprecated": true, - "default": {} - }, - "dependencies": { - "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$dynamicRef": "#meta" }, - { "$ref": "meta/validation#/$defs/stringArray" } - ] - }, - "deprecated": true, - "default": {} - }, - "$recursiveAnchor": { - "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", - "$ref": "meta/core#/$defs/anchorString", - "deprecated": true - }, - "$recursiveRef": { - "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", - "$ref": "meta/core#/$defs/uriReferenceString", - "deprecated": true - } - } - }`) -} - -var vocabSchemas = map[string]string{ - "https://json-schema.org/draft/2019-09/meta/core": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/core", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/core": true - }, - "$recursiveAnchor": true, - - "title": "Core vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference", - "$comment": "Non-empty fragments not allowed.", - "pattern": "^[^#]*#?$" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$anchor": { - "type": "string", - "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$recursiveRef": { - "type": "string", - "format": "uri-reference" - }, - "$recursiveAnchor": { - "type": "boolean", - "default": false - }, - "$vocabulary": { - "type": "object", - "propertyNames": { - "type": "string", - "format": "uri" - }, - "additionalProperties": { - "type": "boolean" - } - }, - "$comment": { - "type": "string" - }, - "$defs": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/applicator": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/applicator", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/applicator": true - }, - "$recursiveAnchor": true, - - "title": "Applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "additionalItems": { "$recursiveRef": "#" }, - "unevaluatedItems": { "$recursiveRef": "#" }, - "items": { - "anyOf": [ - { "$recursiveRef": "#" }, - { "$ref": "#/$defs/schemaArray" } - ] - }, - "contains": { "$recursiveRef": "#" }, - "additionalProperties": { "$recursiveRef": "#" }, - "unevaluatedProperties": { "$recursiveRef": "#" }, - "properties": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependentSchemas": { - "type": "object", - "additionalProperties": { - "$recursiveRef": "#" - } - }, - "propertyNames": { "$recursiveRef": "#" }, - "if": { "$recursiveRef": "#" }, - "then": { "$recursiveRef": "#" }, - "else": { "$recursiveRef": "#" }, - "allOf": { "$ref": "#/$defs/schemaArray" }, - "anyOf": { "$ref": "#/$defs/schemaArray" }, - "oneOf": { "$ref": "#/$defs/schemaArray" }, - "not": { "$recursiveRef": "#" } - }, - "$defs": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$recursiveRef": "#" } - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/validation": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/validation", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/validation": true - }, - "$recursiveAnchor": true, - - "title": "Validation vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, - "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, - "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, - "minContains": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 1 - }, - "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, - "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/$defs/stringArray" }, - "dependentRequired": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/stringArray" - } - }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "type": { - "anyOf": [ - { "$ref": "#/$defs/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/$defs/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - } - }, - "$defs": { - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 0 - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/meta-data": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/meta-data": true - }, - "$recursiveAnchor": true, - - "title": "Meta-data vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "deprecated": { - "type": "boolean", - "default": false - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/format": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/format", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/format": true - }, - "$recursiveAnchor": true, - - "title": "Format vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2019-09/meta/content": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/content", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/content": true - }, - "$recursiveAnchor": true, - - "title": "Content vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "contentSchema": { "$recursiveRef": "#" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/core": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/core", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": true - }, - "$dynamicAnchor": "meta", - - "title": "Core vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "$id": { - "$ref": "#/$defs/uriReferenceString", - "$comment": "Non-empty fragments not allowed.", - "pattern": "^[^#]*#?$" - }, - "$schema": { "$ref": "#/$defs/uriString" }, - "$ref": { "$ref": "#/$defs/uriReferenceString" }, - "$anchor": { "$ref": "#/$defs/anchorString" }, - "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, - "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, - "$vocabulary": { - "type": "object", - "propertyNames": { "$ref": "#/$defs/uriString" }, - "additionalProperties": { - "type": "boolean" - } - }, - "$comment": { - "type": "string" - }, - "$defs": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" } - } - }, - "$defs": { - "anchorString": { - "type": "string", - "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" - }, - "uriString": { - "type": "string", - "format": "uri" - }, - "uriReferenceString": { - "type": "string", - "format": "uri-reference" - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/applicator": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/applicator", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/applicator": true - }, - "$dynamicAnchor": "meta", - - "title": "Applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "prefixItems": { "$ref": "#/$defs/schemaArray" }, - "items": { "$dynamicRef": "#meta" }, - "contains": { "$dynamicRef": "#meta" }, - "additionalProperties": { "$dynamicRef": "#meta" }, - "properties": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependentSchemas": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "default": {} - }, - "propertyNames": { "$dynamicRef": "#meta" }, - "if": { "$dynamicRef": "#meta" }, - "then": { "$dynamicRef": "#meta" }, - "else": { "$dynamicRef": "#meta" }, - "allOf": { "$ref": "#/$defs/schemaArray" }, - "anyOf": { "$ref": "#/$defs/schemaArray" }, - "oneOf": { "$ref": "#/$defs/schemaArray" }, - "not": { "$dynamicRef": "#meta" } - }, - "$defs": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$dynamicRef": "#meta" } - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/unevaluated": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/unevaluated": true - }, - "$dynamicAnchor": "meta", - - "title": "Unevaluated applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "unevaluatedItems": { "$dynamicRef": "#meta" }, - "unevaluatedProperties": { "$dynamicRef": "#meta" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/validation": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/validation", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/validation": true - }, - "$dynamicAnchor": "meta", - - "title": "Validation vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "type": { - "anyOf": [ - { "$ref": "#/$defs/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/$defs/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, - "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, - "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, - "minContains": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 1 - }, - "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, - "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/$defs/stringArray" }, - "dependentRequired": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/stringArray" - } - } - }, - "$defs": { - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 0 - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/meta-data": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/meta-data": true - }, - "$dynamicAnchor": "meta", - - "title": "Meta-data vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "deprecated": { - "type": "boolean", - "default": false - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/format-annotation": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/format-annotation": true - }, - "$dynamicAnchor": "meta", - - "title": "Format vocabulary meta-schema for annotation results", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/format-assertion": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/format-assertion": true - }, - "$dynamicAnchor": "meta", - - "title": "Format vocabulary meta-schema for assertion results", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/content": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/content", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/content": true - }, - "$dynamicAnchor": "meta", - - "title": "Content vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "contentEncoding": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentSchema": { "$dynamicRef": "#meta" } - } - }`, -} - -func clone(m map[string]position) map[string]position { - mm := make(map[string]position) - for k, v := range m { - mm[k] = v - } - return mm -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go deleted file mode 100644 index deaded89f..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsonschema - -import ( - "fmt" - "strings" -) - -// InvalidJSONTypeError is the error type returned by ValidateInterface. -// this tells that specified go object is not valid jsonType. -type InvalidJSONTypeError string - -func (e InvalidJSONTypeError) Error() string { - return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e)) -} - -// InfiniteLoopError is returned by Compile/Validate. -// this gives url#keywordLocation that lead to infinity loop. -type InfiniteLoopError string - -func (e InfiniteLoopError) Error() string { - return "jsonschema: infinite loop " + string(e) -} - -func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError { - var path string - for _, ref := range stack { - if path == "" { - path += ref.schema.Location - } else { - path += "/" + ref.path - } - } - return InfiniteLoopError(path + "/" + sref.path) -} - -// SchemaError is the error type returned by Compile. -type SchemaError struct { - // SchemaURL is the url to json-schema that filed to compile. - // This is helpful, if your schema refers to external schemas - SchemaURL string - - // Err is the error that occurred during compilation. - // It could be ValidationError, because compilation validates - // given schema against the json meta-schema - Err error -} - -func (se *SchemaError) Unwrap() error { - return se.Err -} - -func (se *SchemaError) Error() string { - s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL) - if se.Err != nil { - return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: ")) - } - return s -} - -func (se *SchemaError) GoString() string { - if _, ok := se.Err.(*ValidationError); ok { - return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err) - } - return se.Error() -} - -// ValidationError is the error type returned by Validate. -type ValidationError struct { - KeywordLocation string // validation path of validating keyword or schema - AbsoluteKeywordLocation string // absolute location of validating keyword or schema - InstanceLocation string // location of the json value within the instance being validated - Message string // describes error - Causes []*ValidationError // nested validation errors -} - -func (ve *ValidationError) add(causes ...error) error { - for _, cause := range causes { - ve.Causes = append(ve.Causes, cause.(*ValidationError)) - } - return ve -} - -func (ve *ValidationError) causes(err error) error { - if err := err.(*ValidationError); err.Message == "" { - ve.Causes = err.Causes - } else { - ve.add(err) - } - return ve -} - -func (ve *ValidationError) Error() string { - leaf := ve - for len(leaf.Causes) > 0 { - leaf = leaf.Causes[0] - } - u, _ := split(ve.AbsoluteKeywordLocation) - return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message) -} - -func (ve *ValidationError) GoString() string { - sloc := ve.AbsoluteKeywordLocation - sloc = sloc[strings.IndexByte(sloc, '#')+1:] - msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message) - for _, c := range ve.Causes { - for _, line := range strings.Split(c.GoString(), "\n") { - msg += "\n " + line - } - } - return msg -} - -func joinPtr(ptr1, ptr2 string) string { - if len(ptr1) == 0 { - return ptr2 - } - if len(ptr2) == 0 { - return ptr1 - } - return ptr1 + "/" + ptr2 -} - -// quote returns single-quoted string -func quote(s string) string { - s = fmt.Sprintf("%q", s) - s = strings.ReplaceAll(s, `\"`, `"`) - s = strings.ReplaceAll(s, `'`, `\'`) - return "'" + s[1:len(s)-1] + "'" -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go deleted file mode 100644 index 452ba118c..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go +++ /dev/null @@ -1,116 +0,0 @@ -package jsonschema - -// ExtCompiler compiles custom keyword(s) into ExtSchema. -type ExtCompiler interface { - // Compile compiles the custom keywords in schema m and returns its compiled representation. - // if the schema m does not contain the keywords defined by this extension, - // compiled representation nil should be returned. - Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error) -} - -// ExtSchema is schema representation of custom keyword(s) -type ExtSchema interface { - // Validate validates the json value v with this ExtSchema. - // Returned error must be *ValidationError. - Validate(ctx ValidationContext, v interface{}) error -} - -type extension struct { - meta *Schema - compiler ExtCompiler -} - -// RegisterExtension registers custom keyword(s) into this compiler. -// -// name is extension name, used only to avoid name collisions. -// meta captures the metaschema for the new keywords. -// This is used to validate the schema before calling ext.Compile. -func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) { - c.extensions[name] = extension{meta, ext} -} - -// CompilerContext --- - -// CompilerContext provides additional context required in compiling for extension. -type CompilerContext struct { - c *Compiler - r *resource - stack []schemaRef - res *resource -} - -// Compile compiles given value at ptr into *Schema. This is useful in implementing -// keyword like allOf/not/patternProperties. -// -// schPath is the relative-json-pointer to the schema to be compiled from parent schema. -// -// applicableOnSameInstance tells whether current schema and the given schema -// are applied on same instance value. this is used to detect infinite loop in schema. -func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) { - var stack []schemaRef - if applicableOnSameInstance { - stack = ctx.stack - } - return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath) -} - -// CompileRef compiles the schema referenced by ref uri -// -// refPath is the relative-json-pointer to ref. -// -// applicableOnSameInstance tells whether current schema and the given schema -// are applied on same instance value. this is used to detect infinite loop in schema. -func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) { - var stack []schemaRef - if applicableOnSameInstance { - stack = ctx.stack - } - return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref) -} - -// ValidationContext --- - -// ValidationContext provides additional context required in validating for extension. -type ValidationContext struct { - result validationResult - validate func(sch *Schema, schPath string, v interface{}, vpath string) error - validateInplace func(sch *Schema, schPath string) error - validationError func(keywordPath string, format string, a ...interface{}) *ValidationError -} - -// EvaluatedProp marks given property of object as evaluated. -func (ctx ValidationContext) EvaluatedProp(prop string) { - delete(ctx.result.unevalProps, prop) -} - -// EvaluatedItem marks given index of array as evaluated. -func (ctx ValidationContext) EvaluatedItem(index int) { - delete(ctx.result.unevalItems, index) -} - -// Validate validates schema s with value v. Extension must use this method instead of -// *Schema.ValidateInterface method. This will be useful in implementing keywords like -// allOf/oneOf -// -// spath is relative-json-pointer to s -// vpath is relative-json-pointer to v. -func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error { - if vpath == "" { - return ctx.validateInplace(s, spath) - } - return ctx.validate(s, spath, v, vpath) -} - -// Error used to construct validation error by extensions. -// -// keywordPath is relative-json-pointer to keyword. -func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError { - return ctx.validationError(keywordPath, format, a...) -} - -// Group is used by extensions to group multiple errors as causes to parent error. -// This is useful in implementing keywords like allOf where each schema specified -// in allOf can result a validationError. -func (ValidationError) Group(parent *ValidationError, causes ...error) error { - return parent.add(causes...) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go deleted file mode 100644 index 05686073f..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go +++ /dev/null @@ -1,567 +0,0 @@ -package jsonschema - -import ( - "errors" - "net" - "net/mail" - "net/url" - "regexp" - "strconv" - "strings" - "time" -) - -// Formats is a registry of functions, which know how to validate -// a specific format. -// -// New Formats can be registered by adding to this map. Key is format name, -// value is function that knows how to validate that format. -var Formats = map[string]func(interface{}) bool{ - "date-time": isDateTime, - "date": isDate, - "time": isTime, - "duration": isDuration, - "period": isPeriod, - "hostname": isHostname, - "email": isEmail, - "ip-address": isIPV4, - "ipv4": isIPV4, - "ipv6": isIPV6, - "uri": isURI, - "iri": isURI, - "uri-reference": isURIReference, - "uriref": isURIReference, - "iri-reference": isURIReference, - "uri-template": isURITemplate, - "regex": isRegex, - "json-pointer": isJSONPointer, - "relative-json-pointer": isRelativeJSONPointer, - "uuid": isUUID, -} - -// isDateTime tells whether given string is a valid date representation -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isDateTime(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ - return false - } - if s[10] != 'T' && s[10] != 't' { - return false - } - return isDate(s[:10]) && isTime(s[11:]) -} - -// isDate tells whether given string is a valid full-date production -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isDate(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := time.Parse("2006-01-02", s) - return err == nil -} - -// isTime tells whether given string is a valid full-time production -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isTime(v interface{}) bool { - str, ok := v.(string) - if !ok { - return true - } - - // golang time package does not support leap seconds. - // so we are parsing it manually here. - - // hh:mm:ss - // 01234567 - if len(str) < 9 || str[2] != ':' || str[5] != ':' { - return false - } - isInRange := func(str string, min, max int) (int, bool) { - n, err := strconv.Atoi(str) - if err != nil { - return 0, false - } - if n < min || n > max { - return 0, false - } - return n, true - } - var h, m, s int - if h, ok = isInRange(str[0:2], 0, 23); !ok { - return false - } - if m, ok = isInRange(str[3:5], 0, 59); !ok { - return false - } - if s, ok = isInRange(str[6:8], 0, 60); !ok { - return false - } - str = str[8:] - - // parse secfrac if present - if str[0] == '.' { - // dot following more than one digit - str = str[1:] - var numDigits int - for str != "" { - if str[0] < '0' || str[0] > '9' { - break - } - numDigits++ - str = str[1:] - } - if numDigits == 0 { - return false - } - } - - if len(str) == 0 { - return false - } - - if str[0] == 'z' || str[0] == 'Z' { - if len(str) != 1 { - return false - } - } else { - // time-numoffset - // +hh:mm - // 012345 - if len(str) != 6 || str[3] != ':' { - return false - } - - var sign int - if str[0] == '+' { - sign = -1 - } else if str[0] == '-' { - sign = +1 - } else { - return false - } - - var zh, zm int - if zh, ok = isInRange(str[1:3], 0, 23); !ok { - return false - } - if zm, ok = isInRange(str[4:6], 0, 59); !ok { - return false - } - - // apply timezone offset - hm := (h*60 + m) + sign*(zh*60+zm) - if hm < 0 { - hm += 24 * 60 - } - h, m = hm/60, hm%60 - } - - // check leapsecond - if s == 60 { // leap second - if h != 23 || m != 59 { - return false - } - } - - return true -} - -// isDuration tells whether given string is a valid duration format -// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details -func isDuration(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if len(s) == 0 || s[0] != 'P' { - return false - } - s = s[1:] - parseUnits := func() (units string, ok bool) { - for len(s) > 0 && s[0] != 'T' { - digits := false - for { - if len(s) == 0 { - break - } - if s[0] < '0' || s[0] > '9' { - break - } - digits = true - s = s[1:] - } - if !digits || len(s) == 0 { - return units, false - } - units += s[:1] - s = s[1:] - } - return units, true - } - units, ok := parseUnits() - if !ok { - return false - } - if units == "W" { - return len(s) == 0 // P_W - } - if len(units) > 0 { - if strings.Index("YMD", units) == -1 { - return false - } - if len(s) == 0 { - return true // "P" dur-date - } - } - if len(s) == 0 || s[0] != 'T' { - return false - } - s = s[1:] - units, ok = parseUnits() - return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1 -} - -// isPeriod tells whether given string is a valid period format -// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details -func isPeriod(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - slash := strings.IndexByte(s, '/') - if slash == -1 { - return false - } - start, end := s[:slash], s[slash+1:] - if isDateTime(start) { - return isDateTime(end) || isDuration(end) - } - return isDuration(start) && isDateTime(end) -} - -// isHostname tells whether given string is a valid representation -// for an Internet host name, as defined by RFC 1034 section 3.1 and -// RFC 1123 section 2.1. -// -// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. -func isHostname(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters - s = strings.TrimSuffix(s, ".") - if len(s) > 253 { - return false - } - - // Hostnames are composed of series of labels concatenated with dots, as are all domain names - for _, label := range strings.Split(s, ".") { - // Each label must be from 1 to 63 characters long - if labelLen := len(label); labelLen < 1 || labelLen > 63 { - return false - } - - // labels must not start with a hyphen - // RFC 1123 section 2.1: restriction on the first character - // is relaxed to allow either a letter or a digit - if first := s[0]; first == '-' { - return false - } - - // must not end with a hyphen - if label[len(label)-1] == '-' { - return false - } - - // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), - // the digits '0' through '9', and the hyphen ('-') - for _, c := range label { - if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { - return false - } - } - } - - return true -} - -// isEmail tells whether given string is a valid Internet email address -// as defined by RFC 5322, section 3.4.1. -// -// See https://en.wikipedia.org/wiki/Email_address, for details. -func isEmail(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - // entire email address to be no more than 254 characters long - if len(s) > 254 { - return false - } - - // email address is generally recognized as having two parts joined with an at-sign - at := strings.LastIndexByte(s, '@') - if at == -1 { - return false - } - local := s[0:at] - domain := s[at+1:] - - // local part may be up to 64 characters long - if len(local) > 64 { - return false - } - - // domain if enclosed in brackets, must match an IP address - if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' { - ip := domain[1 : len(domain)-1] - if strings.HasPrefix(ip, "IPv6:") { - return isIPV6(strings.TrimPrefix(ip, "IPv6:")) - } - return isIPV4(ip) - } - - // domain must match the requirements for a hostname - if !isHostname(domain) { - return false - } - - _, err := mail.ParseAddress(s) - return err == nil -} - -// isIPV4 tells whether given string is a valid representation of an IPv4 address -// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. -func isIPV4(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - groups := strings.Split(s, ".") - if len(groups) != 4 { - return false - } - for _, group := range groups { - n, err := strconv.Atoi(group) - if err != nil { - return false - } - if n < 0 || n > 255 { - return false - } - if n != 0 && group[0] == '0' { - return false // leading zeroes should be rejected, as they are treated as octals - } - } - return true -} - -// isIPV6 tells whether given string is a valid representation of an IPv6 address -// as defined in RFC 2373, section 2.2. -func isIPV6(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if !strings.Contains(s, ":") { - return false - } - return net.ParseIP(s) != nil -} - -// isURI tells whether given string is valid URI, according to RFC 3986. -func isURI(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - u, err := urlParse(s) - return err == nil && u.IsAbs() -} - -func urlParse(s string) (*url.URL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - - // if hostname is ipv6, validate it - hostname := u.Hostname() - if strings.IndexByte(hostname, ':') != -1 { - if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 { - return nil, errors.New("ipv6 address is not enclosed in brackets") - } - if !isIPV6(hostname) { - return nil, errors.New("invalid ipv6 address") - } - } - return u, nil -} - -// isURIReference tells whether given string is a valid URI Reference -// (either a URI or a relative-reference), according to RFC 3986. -func isURIReference(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := urlParse(s) - return err == nil && !strings.Contains(s, `\`) -} - -// isURITemplate tells whether given string is a valid URI Template -// according to RFC6570. -// -// Current implementation does minimal validation. -func isURITemplate(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - u, err := urlParse(s) - if err != nil { - return false - } - for _, item := range strings.Split(u.RawPath, "/") { - depth := 0 - for _, ch := range item { - switch ch { - case '{': - depth++ - if depth != 1 { - return false - } - case '}': - depth-- - if depth != 0 { - return false - } - } - } - if depth != 0 { - return false - } - } - return true -} - -// isRegex tells whether given string is a valid regular expression, -// according to the ECMA 262 regular expression dialect. -// -// The implementation uses go-lang regexp package. -func isRegex(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := regexp.Compile(s) - return err == nil -} - -// isJSONPointer tells whether given string is a valid JSON Pointer. -// -// Note: It returns false for JSON Pointer URI fragments. -func isJSONPointer(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if s != "" && !strings.HasPrefix(s, "/") { - return false - } - for _, item := range strings.Split(s, "/") { - for i := 0; i < len(item); i++ { - if item[i] == '~' { - if i == len(item)-1 { - return false - } - switch item[i+1] { - case '0', '1': - // valid - default: - return false - } - } - } - } - return true -} - -// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. -// -// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 -func isRelativeJSONPointer(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if s == "" { - return false - } - if s[0] == '0' { - s = s[1:] - } else if s[0] >= '0' && s[0] <= '9' { - for s != "" && s[0] >= '0' && s[0] <= '9' { - s = s[1:] - } - } else { - return false - } - return s == "#" || isJSONPointer(s) -} - -// isUUID tells whether given string is a valid uuid format -// as specified in RFC4122. -// -// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details -func isUUID(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - parseHex := func(n int) bool { - for n > 0 { - if len(s) == 0 { - return false - } - hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F') - if !hex { - return false - } - s = s[1:] - n-- - } - return true - } - groups := []int{8, 4, 4, 4, 12} - for i, numDigits := range groups { - if !parseHex(numDigits) { - return false - } - if i == len(groups)-1 { - break - } - if len(s) == 0 || s[0] != '-' { - return false - } - s = s[1:] - } - return len(s) == 0 -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go deleted file mode 100644 index 4198cfe37..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package httploader implements loader.Loader for http/https url. -// -// The package is typically only imported for the side effect of -// registering its Loaders. -// -// To use httploader, link this package into your program: -// -// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" -package httploader - -import ( - "fmt" - "io" - "net/http" - - "github.com/santhosh-tekuri/jsonschema/v5" -) - -// Client is the default HTTP Client used to Get the resource. -var Client = http.DefaultClient - -// Load loads resource from given http(s) url. -func Load(url string) (io.ReadCloser, error) { - resp, err := Client.Get(url) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - _ = resp.Body.Close() - return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) - } - return resp.Body, nil -} - -func init() { - jsonschema.Loaders["http"] = Load - jsonschema.Loaders["https"] = Load -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go deleted file mode 100644 index c94195c33..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go +++ /dev/null @@ -1,60 +0,0 @@ -package jsonschema - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" -) - -func loadFileURL(s string) (io.ReadCloser, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - f := u.Path - if runtime.GOOS == "windows" { - f = strings.TrimPrefix(f, "/") - f = filepath.FromSlash(f) - } - return os.Open(f) -} - -// Loaders is a registry of functions, which know how to load -// absolute url of specific schema. -// -// New loaders can be registered by adding to this map. Key is schema, -// value is function that knows how to load url of that schema -var Loaders = map[string]func(url string) (io.ReadCloser, error){ - "file": loadFileURL, -} - -// LoaderNotFoundError is the error type returned by Load function. -// It tells that no Loader is registered for that URL Scheme. -type LoaderNotFoundError string - -func (e LoaderNotFoundError) Error() string { - return fmt.Sprintf("jsonschema: no Loader found for %s", string(e)) -} - -// LoadURL loads document at given absolute URL. The default implementation -// uses Loaders registry to lookup by schema and uses that loader. -// -// Users can change this variable, if they would like to take complete -// responsibility of loading given URL. Used by Compiler if its LoadURL -// field is nil. -var LoadURL = func(s string) (io.ReadCloser, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - loader, ok := Loaders[u.Scheme] - if !ok { - return nil, LoaderNotFoundError(s) - - } - return loader(s) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go deleted file mode 100644 index d65ae2a92..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go +++ /dev/null @@ -1,77 +0,0 @@ -package jsonschema - -// Flag is output format with simple boolean property valid. -type Flag struct { - Valid bool `json:"valid"` -} - -// FlagOutput returns output in flag format -func (ve *ValidationError) FlagOutput() Flag { - return Flag{} -} - -// Basic --- - -// Basic is output format with flat list of output units. -type Basic struct { - Valid bool `json:"valid"` - Errors []BasicError `json:"errors"` -} - -// BasicError is output unit in basic format. -type BasicError struct { - KeywordLocation string `json:"keywordLocation"` - AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` - InstanceLocation string `json:"instanceLocation"` - Error string `json:"error"` -} - -// BasicOutput returns output in basic format -func (ve *ValidationError) BasicOutput() Basic { - var errors []BasicError - var flatten func(*ValidationError) - flatten = func(ve *ValidationError) { - errors = append(errors, BasicError{ - KeywordLocation: ve.KeywordLocation, - AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, - InstanceLocation: ve.InstanceLocation, - Error: ve.Message, - }) - for _, cause := range ve.Causes { - flatten(cause) - } - } - flatten(ve) - return Basic{Errors: errors} -} - -// Detailed --- - -// Detailed is output format based on structure of schema. -type Detailed struct { - Valid bool `json:"valid"` - KeywordLocation string `json:"keywordLocation"` - AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` - InstanceLocation string `json:"instanceLocation"` - Error string `json:"error,omitempty"` - Errors []Detailed `json:"errors,omitempty"` -} - -// DetailedOutput returns output in detailed format -func (ve *ValidationError) DetailedOutput() Detailed { - var errors []Detailed - for _, cause := range ve.Causes { - errors = append(errors, cause.DetailedOutput()) - } - var message = ve.Message - if len(ve.Causes) > 0 { - message = "" - } - return Detailed{ - KeywordLocation: ve.KeywordLocation, - AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, - InstanceLocation: ve.InstanceLocation, - Error: message, - Errors: errors, - } -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go deleted file mode 100644 index 18349daac..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go +++ /dev/null @@ -1,280 +0,0 @@ -package jsonschema - -import ( - "encoding/json" - "fmt" - "io" - "net/url" - "path/filepath" - "runtime" - "strconv" - "strings" -) - -type resource struct { - url string // base url of resource. can be empty - floc string // fragment with json-pointer from root resource - doc interface{} - draft *Draft - subresources map[string]*resource // key is floc. only applicable for root resource - schema *Schema -} - -func (r *resource) String() string { - return r.url + r.floc -} - -func newResource(url string, r io.Reader) (*resource, error) { - if strings.IndexByte(url, '#') != -1 { - panic(fmt.Sprintf("BUG: newResource(%q)", url)) - } - doc, err := unmarshal(r) - if err != nil { - return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err) - } - url, err = toAbs(url) - if err != nil { - return nil, err - } - return &resource{ - url: url, - floc: "#", - doc: doc, - }, nil -} - -// fillSubschemas fills subschemas in res into r.subresources -func (r *resource) fillSubschemas(c *Compiler, res *resource) error { - if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil { - return err - } - - if r.subresources == nil { - r.subresources = make(map[string]*resource) - } - if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil { - return err - } - - // ensure subresource.url uniqueness - url2floc := make(map[string]string) - for _, sr := range r.subresources { - if sr.url != "" { - if floc, ok := url2floc[sr.url]; ok { - return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url) - } - url2floc[sr.url] = sr.floc - } - } - - return nil -} - -// listResources lists all subresources in res -func (r *resource) listResources(res *resource) []*resource { - var result []*resource - prefix := res.floc + "/" - for _, sr := range r.subresources { - if strings.HasPrefix(sr.floc, prefix) { - result = append(result, sr) - } - } - return result -} - -func (r *resource) findResource(url string) *resource { - if r.url == url { - return r - } - for _, res := range r.subresources { - if res.url == url { - return res - } - } - return nil -} - -// resolve fragment f with sr as base -func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) { - if f == "#" || f == "#/" { - return sr, nil - } - - // resolve by anchor - if !strings.HasPrefix(f, "#/") { - // check in given resource - for _, anchor := range r.draft.anchors(sr.doc) { - if anchor == f[1:] { - return sr, nil - } - } - - // check in subresources that has same base url - prefix := sr.floc + "/" - for _, res := range r.subresources { - if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url { - for _, anchor := range r.draft.anchors(res.doc) { - if anchor == f[1:] { - return res, nil - } - } - } - } - return nil, nil - } - - // resolve by ptr - floc := sr.floc + f[1:] - if res, ok := r.subresources[floc]; ok { - return res, nil - } - - // non-standrad location - doc := r.doc - for _, item := range strings.Split(floc[2:], "/") { - item = strings.Replace(item, "~1", "/", -1) - item = strings.Replace(item, "~0", "~", -1) - item, err := url.PathUnescape(item) - if err != nil { - return nil, err - } - switch d := doc.(type) { - case map[string]interface{}: - if _, ok := d[item]; !ok { - return nil, nil - } - doc = d[item] - case []interface{}: - index, err := strconv.Atoi(item) - if err != nil { - return nil, err - } - if index < 0 || index >= len(d) { - return nil, nil - } - doc = d[index] - default: - return nil, nil - } - } - - id, err := r.draft.resolveID(r.baseURL(floc), doc) - if err != nil { - return nil, err - } - res := &resource{url: id, floc: floc, doc: doc} - r.subresources[floc] = res - if err := r.fillSubschemas(c, res); err != nil { - return nil, err - } - return res, nil -} - -func (r *resource) baseURL(floc string) string { - for { - if sr, ok := r.subresources[floc]; ok { - if sr.url != "" { - return sr.url - } - } - slash := strings.LastIndexByte(floc, '/') - if slash == -1 { - break - } - floc = floc[:slash] - } - return r.url -} - -// url helpers --- - -func toAbs(s string) (string, error) { - // if windows absolute file path, convert to file url - // because: net/url parses driver name as scheme - if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` { - s = "file:///" + filepath.ToSlash(s) - } - - u, err := url.Parse(s) - if err != nil { - return "", err - } - if u.IsAbs() { - return s, nil - } - - // s is filepath - if s, err = filepath.Abs(s); err != nil { - return "", err - } - if runtime.GOOS == "windows" { - s = "file:///" + filepath.ToSlash(s) - } else { - s = "file://" + s - } - u, err = url.Parse(s) // to fix spaces in filepath - return u.String(), err -} - -func resolveURL(base, ref string) (string, error) { - if ref == "" { - return base, nil - } - if strings.HasPrefix(ref, "urn:") { - return ref, nil - } - - refURL, err := url.Parse(ref) - if err != nil { - return "", err - } - if refURL.IsAbs() { - return ref, nil - } - - if strings.HasPrefix(base, "urn:") { - base, _ = split(base) - return base + ref, nil - } - - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - return baseURL.ResolveReference(refURL).String(), nil -} - -func split(uri string) (string, string) { - hash := strings.IndexByte(uri, '#') - if hash == -1 { - return uri, "#" - } - f := uri[hash:] - if f == "#/" { - f = "#" - } - return uri[0:hash], f -} - -func (s *Schema) url() string { - u, _ := split(s.Location) - return u -} - -func (s *Schema) loc() string { - _, f := split(s.Location) - return f[1:] -} - -func unmarshal(r io.Reader) (interface{}, error) { - decoder := json.NewDecoder(r) - decoder.UseNumber() - var doc interface{} - if err := decoder.Decode(&doc); err != nil { - return nil, err - } - if t, _ := decoder.Token(); t != nil { - return nil, fmt.Errorf("invalid character %v after top-level value", t) - } - return doc, nil -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go deleted file mode 100644 index 688f0a6fe..000000000 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go +++ /dev/null @@ -1,900 +0,0 @@ -package jsonschema - -import ( - "bytes" - "encoding/json" - "fmt" - "hash/maphash" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -// A Schema represents compiled version of json-schema. -type Schema struct { - Location string // absolute location - - Draft *Draft // draft used by schema. - meta *Schema - vocab []string - dynamicAnchors []*Schema - - // type agnostic validations - Format string - format func(interface{}) bool - Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. - Ref *Schema - RecursiveAnchor bool - RecursiveRef *Schema - DynamicAnchor string - DynamicRef *Schema - dynamicRefAnchor string - Types []string // allowed types. - Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. - Enum []interface{} // allowed values. - enumError string // error message for enum fail. captured here to avoid constructing error message every time. - Not *Schema - AllOf []*Schema - AnyOf []*Schema - OneOf []*Schema - If *Schema - Then *Schema // nil, when If is nil. - Else *Schema // nil, when If is nil. - - // object validations - MinProperties int // -1 if not specified. - MaxProperties int // -1 if not specified. - Required []string // list of required properties. - Properties map[string]*Schema - PropertyNames *Schema - RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. - PatternProperties map[*regexp.Regexp]*Schema - AdditionalProperties interface{} // nil or bool or *Schema. - Dependencies map[string]interface{} // map value is *Schema or []string. - DependentRequired map[string][]string - DependentSchemas map[string]*Schema - UnevaluatedProperties *Schema - - // array validations - MinItems int // -1 if not specified. - MaxItems int // -1 if not specified. - UniqueItems bool - Items interface{} // nil or *Schema or []*Schema - AdditionalItems interface{} // nil or bool or *Schema. - PrefixItems []*Schema - Items2020 *Schema // items keyword reintroduced in draft 2020-12 - Contains *Schema - ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated" - MinContains int // 1 if not specified - MaxContains int // -1 if not specified - UnevaluatedItems *Schema - - // string validations - MinLength int // -1 if not specified. - MaxLength int // -1 if not specified. - Pattern *regexp.Regexp - ContentEncoding string - decoder func(string) ([]byte, error) - ContentMediaType string - mediaType func([]byte) error - ContentSchema *Schema - - // number validators - Minimum *big.Rat - ExclusiveMinimum *big.Rat - Maximum *big.Rat - ExclusiveMaximum *big.Rat - MultipleOf *big.Rat - - // annotations. captured only when Compiler.ExtractAnnotations is true. - Title string - Description string - Default interface{} - Comment string - ReadOnly bool - WriteOnly bool - Examples []interface{} - Deprecated bool - - // user defined extensions - Extensions map[string]ExtSchema -} - -func (s *Schema) String() string { - return s.Location -} - -func newSchema(url, floc string, draft *Draft, doc interface{}) *Schema { - // fill with default values - s := &Schema{ - Location: url + floc, - Draft: draft, - MinProperties: -1, - MaxProperties: -1, - MinItems: -1, - MaxItems: -1, - MinContains: 1, - MaxContains: -1, - MinLength: -1, - MaxLength: -1, - } - - if doc, ok := doc.(map[string]interface{}); ok { - if ra, ok := doc["$recursiveAnchor"]; ok { - if ra, ok := ra.(bool); ok { - s.RecursiveAnchor = ra - } - } - if da, ok := doc["$dynamicAnchor"]; ok { - if da, ok := da.(string); ok { - s.DynamicAnchor = da - } - } - } - return s -} - -func (s *Schema) hasVocab(name string) bool { - if s == nil { // during bootstrap - return true - } - if name == "core" { - return true - } - for _, url := range s.vocab { - if url == "https://json-schema.org/draft/2019-09/vocab/"+name { - return true - } - if url == "https://json-schema.org/draft/2020-12/vocab/"+name { - return true - } - } - return false -} - -// Validate validates given doc, against the json-schema s. -// -// the v must be the raw json value. for number precision -// unmarshal with json.UseNumber(). -// -// returns *ValidationError if v does not confirm with schema s. -// returns InfiniteLoopError if it detects loop during validation. -// returns InvalidJSONTypeError if it detects any non json value in v. -func (s *Schema) Validate(v interface{}) (err error) { - return s.validateValue(v, "") -} - -func (s *Schema) validateValue(v interface{}, vloc string) (err error) { - defer func() { - if r := recover(); r != nil { - switch r := r.(type) { - case InfiniteLoopError, InvalidJSONTypeError: - err = r.(error) - default: - panic(r) - } - } - }() - if _, err := s.validate(nil, 0, "", v, vloc); err != nil { - ve := ValidationError{ - KeywordLocation: "", - AbsoluteKeywordLocation: s.Location, - InstanceLocation: vloc, - Message: fmt.Sprintf("doesn't validate with %s", s.Location), - } - return ve.causes(err) - } - return nil -} - -// validate validates given value v with this schema. -func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) { - validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError { - return &ValidationError{ - KeywordLocation: keywordLocation(scope, keywordPath), - AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath), - InstanceLocation: vloc, - Message: fmt.Sprintf(format, a...), - } - } - - sref := schemaRef{spath, s, false} - if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil { - panic(err) - } - scope = append(scope, sref) - vscope++ - - // populate result - switch v := v.(type) { - case map[string]interface{}: - result.unevalProps = make(map[string]struct{}) - for pname := range v { - result.unevalProps[pname] = struct{}{} - } - case []interface{}: - result.unevalItems = make(map[int]struct{}) - for i := range v { - result.unevalItems[i] = struct{}{} - } - } - - validate := func(sch *Schema, schPath string, v interface{}, vpath string) error { - vloc := vloc - if vpath != "" { - vloc += "/" + vpath - } - _, err := sch.validate(scope, 0, schPath, v, vloc) - return err - } - - validateInplace := func(sch *Schema, schPath string) error { - vr, err := sch.validate(scope, vscope, schPath, v, vloc) - if err == nil { - // update result - for pname := range result.unevalProps { - if _, ok := vr.unevalProps[pname]; !ok { - delete(result.unevalProps, pname) - } - } - for i := range result.unevalItems { - if _, ok := vr.unevalItems[i]; !ok { - delete(result.unevalItems, i) - } - } - } - return err - } - - if s.Always != nil { - if !*s.Always { - return result, validationError("", "not allowed") - } - return result, nil - } - - if len(s.Types) > 0 { - vType := jsonType(v) - matched := false - for _, t := range s.Types { - if vType == t { - matched = true - break - } else if t == "integer" && vType == "number" { - num, _ := new(big.Rat).SetString(fmt.Sprint(v)) - if num.IsInt() { - matched = true - break - } - } - } - if !matched { - return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) - } - } - - var errors []error - - if len(s.Constant) > 0 { - if !equals(v, s.Constant[0]) { - switch jsonType(s.Constant[0]) { - case "object", "array": - errors = append(errors, validationError("const", "const failed")) - default: - errors = append(errors, validationError("const", "value must be %#v", s.Constant[0])) - } - } - } - - if len(s.Enum) > 0 { - matched := false - for _, item := range s.Enum { - if equals(v, item) { - matched = true - break - } - } - if !matched { - errors = append(errors, validationError("enum", s.enumError)) - } - } - - if s.format != nil && !s.format(v) { - var val = v - if v, ok := v.(string); ok { - val = quote(v) - } - errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format))) - } - - switch v := v.(type) { - case map[string]interface{}: - if s.MinProperties != -1 && len(v) < s.MinProperties { - errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v))) - } - if s.MaxProperties != -1 && len(v) > s.MaxProperties { - errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v))) - } - if len(s.Required) > 0 { - var missing []string - for _, pname := range s.Required { - if _, ok := v[pname]; !ok { - missing = append(missing, quote(pname)) - } - } - if len(missing) > 0 { - errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", "))) - } - } - - for pname, sch := range s.Properties { - if pvalue, ok := v[pname]; ok { - delete(result.unevalProps, pname) - if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - - if s.PropertyNames != nil { - for pname := range v { - if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - - if s.RegexProperties { - for pname := range v { - if !isRegex(pname) { - errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname))) - } - } - } - for pattern, sch := range s.PatternProperties { - for pname, pvalue := range v { - if pattern.MatchString(pname) { - delete(result.unevalProps, pname) - if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - } - if s.AdditionalProperties != nil { - if allowed, ok := s.AdditionalProperties.(bool); ok { - if !allowed && len(result.unevalProps) > 0 { - errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames())) - } - } else { - schema := s.AdditionalProperties.(*Schema) - for pname := range result.unevalProps { - if pvalue, ok := v[pname]; ok { - if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - } - result.unevalProps = nil - } - for dname, dvalue := range s.Dependencies { - if _, ok := v[dname]; ok { - switch dvalue := dvalue.(type) { - case *Schema: - if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil { - errors = append(errors, err) - } - case []string: - for i, pname := range dvalue { - if _, ok := v[pname]; !ok { - errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) - } - } - } - } - } - for dname, dvalue := range s.DependentRequired { - if _, ok := v[dname]; ok { - for i, pname := range dvalue { - if _, ok := v[pname]; !ok { - errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) - } - } - } - } - for dname, sch := range s.DependentSchemas { - if _, ok := v[dname]; ok { - if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil { - errors = append(errors, err) - } - } - } - - case []interface{}: - if s.MinItems != -1 && len(v) < s.MinItems { - errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v))) - } - if s.MaxItems != -1 && len(v) > s.MaxItems { - errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v))) - } - if s.UniqueItems { - if len(v) <= 20 { - outer1: - for i := 1; i < len(v); i++ { - for j := 0; j < i; j++ { - if equals(v[i], v[j]) { - errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) - break outer1 - } - } - } - } else { - m := make(map[uint64][]int) - var h maphash.Hash - outer2: - for i, item := range v { - h.Reset() - hash(item, &h) - k := h.Sum64() - if err != nil { - panic(err) - } - arr, ok := m[k] - if ok { - for _, j := range arr { - if equals(v[j], item) { - errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) - break outer2 - } - } - } - arr = append(arr, i) - m[k] = arr - } - } - } - - // items + additionalItems - switch items := s.Items.(type) { - case *Schema: - for i, item := range v { - if err := validate(items, "items", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } - result.unevalItems = nil - case []*Schema: - for i, item := range v { - if i < len(items) { - delete(result.unevalItems, i) - if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else if sch, ok := s.AdditionalItems.(*Schema); ok { - delete(result.unevalItems, i) - if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else { - break - } - } - if additionalItems, ok := s.AdditionalItems.(bool); ok { - if additionalItems { - result.unevalItems = nil - } else if len(v) > len(items) { - errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v))) - } - } - } - - // prefixItems + items - for i, item := range v { - if i < len(s.PrefixItems) { - delete(result.unevalItems, i) - if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else if s.Items2020 != nil { - delete(result.unevalItems, i) - if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else { - break - } - } - - // contains + minContains + maxContains - if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) { - matched := 0 - var causes []error - for i, item := range v { - if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil { - causes = append(causes, err) - } else { - matched++ - if s.ContainsEval { - delete(result.unevalItems, i) - } - } - } - if s.MinContains != -1 && matched < s.MinContains { - errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...)) - } - if s.MaxContains != -1 && matched > s.MaxContains { - errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched)) - } - } - - case string: - // minLength + maxLength - if s.MinLength != -1 || s.MaxLength != -1 { - length := utf8.RuneCount([]byte(v)) - if s.MinLength != -1 && length < s.MinLength { - errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length)) - } - if s.MaxLength != -1 && length > s.MaxLength { - errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length)) - } - } - - if s.Pattern != nil && !s.Pattern.MatchString(v) { - errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String()))) - } - - // contentEncoding + contentMediaType - if s.decoder != nil || s.mediaType != nil { - decoded := s.ContentEncoding == "" - var content []byte - if s.decoder != nil { - b, err := s.decoder(v) - if err != nil { - errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding)) - } else { - content, decoded = b, true - } - } - if decoded && s.mediaType != nil { - if s.decoder == nil { - content = []byte(v) - } - if err := s.mediaType(content); err != nil { - errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType))) - } - } - if decoded && s.ContentSchema != nil { - contentJSON, err := unmarshal(bytes.NewReader(content)) - if err != nil { - errors = append(errors, validationError("contentSchema", "value is not valid json")) - } else { - err := validate(s.ContentSchema, "contentSchema", contentJSON, "") - if err != nil { - errors = append(errors, err) - } - } - } - } - - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - // lazy convert to *big.Rat to avoid allocation - var numVal *big.Rat - num := func() *big.Rat { - if numVal == nil { - numVal, _ = new(big.Rat).SetString(fmt.Sprint(v)) - } - return numVal - } - f64 := func(r *big.Rat) float64 { - f, _ := r.Float64() - return f - } - if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { - errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v)) - } - if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { - errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v)) - } - if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { - errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v)) - } - if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { - errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v)) - } - if s.MultipleOf != nil { - if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { - errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf))) - } - } - } - - // $ref + $recursiveRef + $dynamicRef - validateRef := func(sch *Schema, refPath string) error { - if sch != nil { - if err := validateInplace(sch, refPath); err != nil { - var url = sch.Location - if s.url() == sch.url() { - url = sch.loc() - } - return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err) - } - } - return nil - } - if err := validateRef(s.Ref, "$ref"); err != nil { - errors = append(errors, err) - } - if s.RecursiveRef != nil { - sch := s.RecursiveRef - if sch.RecursiveAnchor { - // recursiveRef based on scope - for _, e := range scope { - if e.schema.RecursiveAnchor { - sch = e.schema - break - } - } - } - if err := validateRef(sch, "$recursiveRef"); err != nil { - errors = append(errors, err) - } - } - if s.DynamicRef != nil { - sch := s.DynamicRef - if s.dynamicRefAnchor != "" && sch.DynamicAnchor == s.dynamicRefAnchor { - // dynamicRef based on scope - for i := len(scope) - 1; i >= 0; i-- { - sr := scope[i] - if sr.discard { - break - } - for _, da := range sr.schema.dynamicAnchors { - if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef { - sch = da - break - } - } - } - } - if err := validateRef(sch, "$dynamicRef"); err != nil { - errors = append(errors, err) - } - } - - if s.Not != nil && validateInplace(s.Not, "not") == nil { - errors = append(errors, validationError("not", "not failed")) - } - - for i, sch := range s.AllOf { - schPath := "allOf/" + strconv.Itoa(i) - if err := validateInplace(sch, schPath); err != nil { - errors = append(errors, validationError(schPath, "allOf failed").add(err)) - } - } - - if len(s.AnyOf) > 0 { - matched := false - var causes []error - for i, sch := range s.AnyOf { - if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil { - matched = true - } else { - causes = append(causes, err) - } - } - if !matched { - errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...)) - } - } - - if len(s.OneOf) > 0 { - matched := -1 - var causes []error - for i, sch := range s.OneOf { - if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil { - if matched == -1 { - matched = i - } else { - errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i)) - break - } - } else { - causes = append(causes, err) - } - } - if matched == -1 { - errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...)) - } - } - - // if + then + else - if s.If != nil { - err := validateInplace(s.If, "if") - // "if" leaves dynamic scope - scope[len(scope)-1].discard = true - if err == nil { - if s.Then != nil { - if err := validateInplace(s.Then, "then"); err != nil { - errors = append(errors, validationError("then", "if-then failed").add(err)) - } - } - } else { - if s.Else != nil { - if err := validateInplace(s.Else, "else"); err != nil { - errors = append(errors, validationError("else", "if-else failed").add(err)) - } - } - } - // restore dynamic scope - scope[len(scope)-1].discard = false - } - - for _, ext := range s.Extensions { - if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil { - errors = append(errors, err) - } - } - - // unevaluatedProperties + unevaluatedItems - switch v := v.(type) { - case map[string]interface{}: - if s.UnevaluatedProperties != nil { - for pname := range result.unevalProps { - if pvalue, ok := v[pname]; ok { - if err := validate(s.UnevaluatedProperties, "unevaluatedProperties", pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - result.unevalProps = nil - } - case []interface{}: - if s.UnevaluatedItems != nil { - for i := range result.unevalItems { - if err := validate(s.UnevaluatedItems, "unevaluatedItems", v[i], strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } - result.unevalItems = nil - } - } - - switch len(errors) { - case 0: - return result, nil - case 1: - return result, errors[0] - default: - return result, validationError("", "").add(errors...) // empty message, used just for wrapping - } -} - -type validationResult struct { - unevalProps map[string]struct{} - unevalItems map[int]struct{} -} - -func (vr validationResult) unevalPnames() string { - pnames := make([]string, 0, len(vr.unevalProps)) - for pname := range vr.unevalProps { - pnames = append(pnames, quote(pname)) - } - return strings.Join(pnames, ", ") -} - -// jsonType returns the json type of given value v. -// -// It panics if the given value is not valid json value -func jsonType(v interface{}) string { - switch v.(type) { - case nil: - return "null" - case bool: - return "boolean" - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - return "number" - case string: - return "string" - case []interface{}: - return "array" - case map[string]interface{}: - return "object" - } - panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) -} - -// equals tells if given two json values are equal or not. -func equals(v1, v2 interface{}) bool { - v1Type := jsonType(v1) - if v1Type != jsonType(v2) { - return false - } - switch v1Type { - case "array": - arr1, arr2 := v1.([]interface{}), v2.([]interface{}) - if len(arr1) != len(arr2) { - return false - } - for i := range arr1 { - if !equals(arr1[i], arr2[i]) { - return false - } - } - return true - case "object": - obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) - if len(obj1) != len(obj2) { - return false - } - for k, v1 := range obj1 { - if v2, ok := obj2[k]; ok { - if !equals(v1, v2) { - return false - } - } else { - return false - } - } - return true - case "number": - num1, _ := new(big.Rat).SetString(fmt.Sprint(v1)) - num2, _ := new(big.Rat).SetString(fmt.Sprint(v2)) - return num1.Cmp(num2) == 0 - default: - return v1 == v2 - } -} - -func hash(v interface{}, h *maphash.Hash) { - switch v := v.(type) { - case nil: - h.WriteByte(0) - case bool: - h.WriteByte(1) - if v { - h.WriteByte(1) - } else { - h.WriteByte(0) - } - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - h.WriteByte(2) - num, _ := new(big.Rat).SetString(fmt.Sprint(v)) - h.Write(num.Num().Bytes()) - h.Write(num.Denom().Bytes()) - case string: - h.WriteByte(3) - h.WriteString(v) - case []interface{}: - h.WriteByte(4) - for _, item := range v { - hash(item, h) - } - case map[string]interface{}: - h.WriteByte(5) - props := make([]string, 0, len(v)) - for prop := range v { - props = append(props, prop) - } - sort.Slice(props, func(i, j int) bool { - return props[i] < props[j] - }) - for _, prop := range props { - hash(prop, h) - hash(v[prop], h) - } - default: - panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) - } -} - -// escape converts given token to valid json-pointer token -func escape(token string) string { - token = strings.ReplaceAll(token, "~", "~0") - token = strings.ReplaceAll(token, "/", "~1") - return url.PathEscape(token) -} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules similarity index 91% rename from vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules rename to vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules index 314da31c5..d14f5ea70 100644 --- a/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules @@ -1,3 +1,4 @@ [submodule "testdata/JSON-Schema-Test-Suite"] path = testdata/JSON-Schema-Test-Suite url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git + branch = main diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml new file mode 100644 index 000000000..b3cd1749a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml @@ -0,0 +1,5 @@ +linters: + enable: + - nakedret + - errname + - godot diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml new file mode 100644 index 000000000..695b502ed --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml @@ -0,0 +1,7 @@ +- id: jsonschema-validate + name: Validate JSON against JSON Schema + description: ensure json files follow specified JSON Schema + entry: jv + language: golang + additional_dependencies: + - ./cmd/jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE similarity index 100% rename from vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE rename to vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md new file mode 100644 index 000000000..0831d7f58 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md @@ -0,0 +1,86 @@ +# jsonschema v6.0.0 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v6)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v6) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=boon)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/boon/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema/tree/boon) + +see [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) for examples + +## Library Features + +- [x] pass [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite) excluding optional(compare with other impls at [bowtie](https://bowtie-json-schema.github.io/bowtie/#)) + - [x] [![draft-04](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft4.json)](https://bowtie.report/#/dialects/draft4) + - [x] [![draft-06](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft6.json)](https://bowtie.report/#/dialects/draft6) + - [x] [![draft-07](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft7.json)](https://bowtie.report/#/dialects/draft7) + - [x] [![draft/2019-09](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2019-09.json)](https://bowtie.report/#/dialects/draft2019-09) + - [x] [![draft/2020-12](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2020-12.json)](https://bowtie.report/#/dialects/draft2020-12) +- [x] detect infinite loop traps + - [x] `$schema` cycle + - [x] validation cycle +- [x] custom `$schema` url +- [x] vocabulary based validation +- [x] custom regex engine +- [x] format assertions + - [x] flag to enable in draft >= 2019-09 + - [x] custom format registration + - [x] built-in formats + - [x] regex, uuid + - [x] ipv4, ipv6 + - [x] hostname, email + - [x] date, time, date-time, duration + - [x] json-pointer, relative-json-pointer + - [x] uri, uri-reference, uri-template + - [x] iri, iri-reference + - [x] period, semver +- [x] content assertions + - [x] flag to enable in draft >= 7 + - [x] contentEncoding + - [x] base64 + - [x] custom + - [x] contentMediaType + - [x] application/json + - [x] custom + - [x] contentSchema +- [x] errors + - [x] introspectable + - [x] hierarchy + - [x] alternative display with `#` + - [x] output + - [x] flag + - [x] basic + - [x] detailed +- [x] custom vocabulary + - enable via `$vocabulary` for draft >=2019-19 + - enable via flag for draft <= 7 +- [x] mixed dialect support + +## CLI + +to install: `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +``` +Usage: jv [OPTIONS] SCHEMA [INSTANCE...] + +Options: + -c, --assert-content Enable content assertions with draft >= 7 + -f, --assert-format Enable format assertions with draft >= 2019 + --cacert pem-file Use the specified pem-file to verify the peer. The file may contain multiple CA certificates + -d, --draft version Draft version used when '$schema' is missing. Valid values 4, 6, 7, 2019, 2020 (default 2020) + -h, --help Print help information + -k, --insecure Use insecure TLS connection + -o, --output format Output format. Valid values simple, alt, flag, basic, detailed (default "simple") + -q, --quiet Do not print errors + -v, --version Print build information +``` + +- [x] exit code `1` for validation erros, `2` for usage errors +- [x] validate both schema and multiple instances +- [x] support both json and yaml files +- [x] support standard input, use `-` +- [x] quite mode with parsable output +- [x] http(s) url support + - [x] custom certs for validation, use `--cacert` + - [x] flag to skip certificate verification, use `--insecure` + diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go new file mode 100644 index 000000000..4da736103 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go @@ -0,0 +1,332 @@ +package jsonschema + +import ( + "fmt" + "regexp" + "slices" +) + +// Compiler compiles json schema into *Schema. +type Compiler struct { + schemas map[urlPtr]*Schema + roots *roots + formats map[string]*Format + decoders map[string]*Decoder + mediaTypes map[string]*MediaType + assertFormat bool + assertContent bool +} + +// NewCompiler create Compiler Object. +func NewCompiler() *Compiler { + return &Compiler{ + schemas: map[urlPtr]*Schema{}, + roots: newRoots(), + formats: map[string]*Format{}, + decoders: map[string]*Decoder{}, + mediaTypes: map[string]*MediaType{}, + assertFormat: false, + assertContent: false, + } +} + +// DefaultDraft overrides the draft used to +// compile schemas without `$schema` field. +// +// By default, this library uses the latest +// draft supported. +// +// The use of this option is HIGHLY encouraged +// to ensure continued correct operation of your +// schema. The current default value will not stay +// the same overtime. +func (c *Compiler) DefaultDraft(d *Draft) { + c.roots.defaultDraft = d +} + +// AssertFormat always enables format assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema says `format` vocabulary is required. +// for draft/2020-12: disabled unless metaschema says `format-assertion` vocabulary is required. +func (c *Compiler) AssertFormat() { + c.assertFormat = true +} + +// AssertContent enables content assertions. +// +// Content assertions include keywords: +// - contentEncoding +// - contentMediaType +// - contentSchema +// +// Default behavior is always disabled. +func (c *Compiler) AssertContent() { + c.assertContent = true +} + +// RegisterFormat registers custom format. +// +// NOTE: +// - "regex" format can not be overridden +// - format assertions are disabled for draft >= 2019-09 +// see [Compiler.AssertFormat] +func (c *Compiler) RegisterFormat(f *Format) { + if f.Name != "regex" { + c.formats[f.Name] = f + } +} + +// RegisterContentEncoding registers custom contentEncoding. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentEncoding(d *Decoder) { + c.decoders[d.Name] = d +} + +// RegisterContentMediaType registers custom contentMediaType. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentMediaType(mt *MediaType) { + c.mediaTypes[mt.Name] = mt +} + +// RegisterVocabulary registers custom vocabulary. +// +// NOTE: +// - vocabularies are disabled for draft >= 2019-09 +// see [Compiler.AssertVocabs] +func (c *Compiler) RegisterVocabulary(vocab *Vocabulary) { + c.roots.vocabularies[vocab.URL] = vocab +} + +// AssertVocabs always enables user-defined vocabularies assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema enables a vocabulary. +// for draft/2020-12: disabled unless metaschema enables a vocabulary. +func (c *Compiler) AssertVocabs() { + c.roots.assertVocabs = true +} + +// AddResource adds schema resource which gets used later in reference +// resolution. +// +// The argument url can be file path or url. Any fragment in url is ignored. +// The argument doc must be valid json value. +func (c *Compiler) AddResource(url string, doc any) error { + uf, err := absolute(url) + if err != nil { + return err + } + if isMeta(string(uf.url)) { + return &ResourceExistsError{string(uf.url)} + } + if !c.roots.loader.add(uf.url, doc) { + return &ResourceExistsError{string(uf.url)} + } + return nil +} + +// UseLoader overrides the default [URLLoader] used +// to load schema resources. +func (c *Compiler) UseLoader(loader URLLoader) { + c.roots.loader.loader = loader +} + +// UseRegexpEngine changes the regexp-engine used. +// By default it uses regexp package from go standard +// library. +// +// NOTE: must be called before compiling any schemas. +func (c *Compiler) UseRegexpEngine(engine RegexpEngine) { + if engine == nil { + engine = goRegexpCompile + } + c.roots.regexpEngine = engine +} + +func (c *Compiler) enqueue(q *queue, up urlPtr) *Schema { + if sch, ok := c.schemas[up]; ok { + // already got compiled + return sch + } + if sch := q.get(up); sch != nil { + return sch + } + sch := newSchema(up) + q.append(sch) + return sch +} + +// MustCompile is like [Compile] but panics if compilation fails. +// It simplifies safe initialization of global variables holding +// compiled schema. +func (c *Compiler) MustCompile(loc string) *Schema { + sch, err := c.Compile(loc) + if err != nil { + panic(fmt.Sprintf("jsonschema: Compile(%q): %v", loc, err)) + } + return sch +} + +// Compile compiles json-schema at given loc. +func (c *Compiler) Compile(loc string) (*Schema, error) { + uf, err := absolute(loc) + if err != nil { + return nil, err + } + up, err := c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.doCompile(up) +} + +func (c *Compiler) doCompile(up urlPtr) (*Schema, error) { + q := &queue{} + compiled := 0 + + c.enqueue(q, up) + for q.len() > compiled { + sch := q.at(compiled) + if err := c.roots.ensureSubschema(sch.up); err != nil { + return nil, err + } + r := c.roots.roots[sch.up.url] + v, err := sch.up.lookup(r.doc) + if err != nil { + return nil, err + } + if err := c.compileValue(v, sch, r, q); err != nil { + return nil, err + } + compiled++ + } + for _, sch := range *q { + c.schemas[sch.up] = sch + } + return c.schemas[up], nil +} + +func (c *Compiler) compileValue(v any, sch *Schema, r *root, q *queue) error { + res := r.resource(sch.up.ptr) + sch.DraftVersion = res.dialect.draft.version + + base := urlPtr{sch.up.url, res.ptr} + sch.resource = c.enqueue(q, base) + + // if resource, enqueue dynamic anchors for compilation + if sch.DraftVersion >= 2020 && sch.up == sch.resource.up { + res := r.resource(sch.up.ptr) + for anchor, anchorPtr := range res.anchors { + if slices.Contains(res.dynamicAnchors, anchor) { + up := urlPtr{sch.up.url, anchorPtr} + danchorSch := c.enqueue(q, up) + if sch.dynamicAnchors == nil { + sch.dynamicAnchors = map[string]*Schema{} + } + sch.dynamicAnchors[string(anchor)] = danchorSch + } + } + } + + switch v := v.(type) { + case bool: + sch.Bool = &v + case map[string]any: + if err := c.compileObject(v, sch, r, q); err != nil { + return err + } + } + + sch.allPropsEvaluated = sch.AdditionalProperties != nil + if sch.DraftVersion < 2020 { + sch.allItemsEvaluated = sch.AdditionalItems != nil + switch items := sch.Items.(type) { + case *Schema: + sch.allItemsEvaluated = true + case []*Schema: + sch.numItemsEvaluated = len(items) + } + } else { + sch.allItemsEvaluated = sch.Items2020 != nil + sch.numItemsEvaluated = len(sch.PrefixItems) + } + + return nil +} + +func (c *Compiler) compileObject(obj map[string]any, sch *Schema, r *root, q *queue) error { + if len(obj) == 0 { + b := true + sch.Bool = &b + return nil + } + oc := objCompiler{ + c: c, + obj: obj, + up: sch.up, + r: r, + res: r.resource(sch.up.ptr), + q: q, + } + return oc.compile(sch) +} + +// queue -- + +type queue []*Schema + +func (q *queue) append(sch *Schema) { + *q = append(*q, sch) +} + +func (q *queue) at(i int) *Schema { + return (*q)[i] +} + +func (q *queue) len() int { + return len(*q) +} + +func (q *queue) get(up urlPtr) *Schema { + i := slices.IndexFunc(*q, func(sch *Schema) bool { return sch.up == up }) + if i != -1 { + return (*q)[i] + } + return nil +} + +// regexp -- + +// Regexp is the representation of compiled regular expression. +type Regexp interface { + fmt.Stringer + + // MatchString reports whether the string s contains + // any match of the regular expression. + MatchString(string) bool +} + +// RegexpEngine parses a regular expression and returns, +// if successful, a Regexp object that can be used to +// match against text. +type RegexpEngine func(string) (Regexp, error) + +func (re RegexpEngine) validate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := re(s) + return err +} + +func goRegexpCompile(s string) (Regexp, error) { + return regexp.Compile(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go new file mode 100644 index 000000000..8d62e58b0 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go @@ -0,0 +1,51 @@ +package jsonschema + +import ( + "bytes" + "encoding/base64" + "encoding/json" +) + +// Decoder specifies how to decode specific contentEncoding. +type Decoder struct { + // Name of contentEncoding. + Name string + // Decode given string to byte array. + Decode func(string) ([]byte, error) +} + +var decoders = map[string]*Decoder{ + "base64": { + Name: "base64", + Decode: func(s string) ([]byte, error) { + return base64.StdEncoding.DecodeString(s) + }, + }, +} + +// MediaType specified how to validate bytes against specific contentMediaType. +type MediaType struct { + // Name of contentMediaType. + Name string + + // Validate checks whether bytes conform to this mediatype. + Validate func([]byte) error + + // UnmarshalJSON unmarshals bytes into json value. + // This must be nil if this mediatype is not compatible + // with json. + UnmarshalJSON func([]byte) (any, error) +} + +var mediaTypes = map[string]*MediaType{ + "application/json": { + Name: "application/json", + Validate: func(b []byte) error { + var v any + return json.Unmarshal(b, &v) + }, + UnmarshalJSON: func(b []byte) (any, error) { + return UnmarshalJSON(bytes.NewReader(b)) + }, + }, +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go new file mode 100644 index 000000000..fd09bae8d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go @@ -0,0 +1,360 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +// A Draft represents json-schema specification. +type Draft struct { + version int + url string + sch *Schema + id string // property name used to represent id + subschemas []SchemaPath // locations of subschemas + vocabPrefix string // prefix used for vocabulary + allVocabs map[string]*Schema // names of supported vocabs with its schemas + defaultVocabs []string // names of default vocabs +} + +// String returns the specification url. +func (d *Draft) String() string { + return d.url +} + +var ( + Draft4 = &Draft{ + version: 4, + url: "http://json-schema.org/draft-04/schema", + id: "id", + subschemas: []SchemaPath{ + // type agonistic + schemaPath("definitions/*"), + schemaPath("not"), + schemaPath("allOf/[]"), + schemaPath("anyOf/[]"), + schemaPath("oneOf/[]"), + // object + schemaPath("properties/*"), + schemaPath("additionalProperties"), + schemaPath("patternProperties/*"), + // array + schemaPath("items"), + schemaPath("items/[]"), + schemaPath("additionalItems"), + schemaPath("dependencies/*"), + }, + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft6 = &Draft{ + version: 6, + url: "http://json-schema.org/draft-06/schema", + id: "$id", + subschemas: joinSubschemas(Draft4.subschemas, + schemaPath("propertyNames"), + schemaPath("contains"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft7 = &Draft{ + version: 7, + url: "http://json-schema.org/draft-07/schema", + id: "$id", + subschemas: joinSubschemas(Draft6.subschemas, + schemaPath("if"), + schemaPath("then"), + schemaPath("else"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft2019 = &Draft{ + version: 2019, + url: "https://json-schema.org/draft/2019-09/schema", + id: "$id", + subschemas: joinSubschemas(Draft7.subschemas, + schemaPath("$defs/*"), + schemaPath("dependentSchemas/*"), + schemaPath("unevaluatedProperties"), + schemaPath("unevaluatedItems"), + schemaPath("contentSchema"), + ), + vocabPrefix: "https://json-schema.org/draft/2019-09/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "validation": nil, + "meta-data": nil, + "format": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "validation"}, + } + + Draft2020 = &Draft{ + version: 2020, + url: "https://json-schema.org/draft/2020-12/schema", + id: "$id", + subschemas: joinSubschemas(Draft2019.subschemas, + schemaPath("prefixItems/[]"), + ), + vocabPrefix: "https://json-schema.org/draft/2020-12/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "unevaluated": nil, + "validation": nil, + "meta-data": nil, + "format-annotation": nil, + "format-assertion": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "unevaluated", "validation"}, + } + + draftLatest = Draft2020 +) + +func init() { + c := NewCompiler() + c.AssertFormat() + for _, d := range []*Draft{Draft4, Draft6, Draft7, Draft2019, Draft2020} { + d.sch = c.MustCompile(d.url) + for name := range d.allVocabs { + d.allVocabs[name] = c.MustCompile(strings.TrimSuffix(d.url, "schema") + "meta/" + name) + } + } +} + +func draftFromURL(url string) *Draft { + u, frag := split(url) + if frag != "" { + return nil + } + u, ok := strings.CutPrefix(u, "http://") + if !ok { + u, _ = strings.CutPrefix(u, "https://") + } + switch u { + case "json-schema.org/schema": + return draftLatest + case "json-schema.org/draft/2020-12/schema": + return Draft2020 + case "json-schema.org/draft/2019-09/schema": + return Draft2019 + case "json-schema.org/draft-07/schema": + return Draft7 + case "json-schema.org/draft-06/schema": + return Draft6 + case "json-schema.org/draft-04/schema": + return Draft4 + default: + return nil + } +} + +func (d *Draft) getID(obj map[string]any) string { + if d.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return "" + } + } + + id, ok := strVal(obj, d.id) + if !ok { + return "" + } + id, _ = split(id) // ignore fragment + return id +} + +func (d *Draft) getVocabs(url url, doc any, vocabularies map[string]*Vocabulary) ([]string, error) { + if d.version < 2019 { + return nil, nil + } + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + v, ok := obj["$vocabulary"] + if !ok { + return nil, nil + } + obj, ok = v.(map[string]any) + if !ok { + return nil, nil + } + + var vocabs []string + for vocab, reqd := range obj { + if reqd, ok := reqd.(bool); !ok || !reqd { + continue + } + name, ok := strings.CutPrefix(vocab, d.vocabPrefix) + if ok { + if _, ok := d.allVocabs[name]; ok { + if !slices.Contains(vocabs, name) { + vocabs = append(vocabs, name) + continue + } + } + } + if _, ok := vocabularies[vocab]; !ok { + return nil, &UnsupportedVocabularyError{url.String(), vocab} + } + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + if !slices.Contains(vocabs, "core") { + vocabs = append(vocabs, "core") + } + return vocabs, nil +} + +// -- + +type dialect struct { + draft *Draft + vocabs []string // nil means use draft.defaultVocabs +} + +func (d *dialect) hasVocab(name string) bool { + if name == "core" || d.draft.version < 2019 { + return true + } + if d.vocabs != nil { + return slices.Contains(d.vocabs, name) + } + return slices.Contains(d.draft.defaultVocabs, name) +} + +func (d *dialect) activeVocabs(assertVocabs bool, vocabularies map[string]*Vocabulary) []string { + if len(vocabularies) == 0 { + return d.vocabs + } + if d.draft.version < 2019 { + assertVocabs = true + } + if !assertVocabs { + return d.vocabs + } + var vocabs []string + if d.vocabs == nil { + vocabs = slices.Clone(d.draft.defaultVocabs) + } else { + vocabs = slices.Clone(d.vocabs) + } + for vocab := range vocabularies { + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + return vocabs +} + +func (d *dialect) getSchema(assertVocabs bool, vocabularies map[string]*Vocabulary) *Schema { + vocabs := d.activeVocabs(assertVocabs, vocabularies) + if vocabs == nil { + return d.draft.sch + } + + var allOf []*Schema + for _, vocab := range vocabs { + sch := d.draft.allVocabs[vocab] + if sch == nil { + if v, ok := vocabularies[vocab]; ok { + sch = v.Schema + } + } + if sch != nil { + allOf = append(allOf, sch) + } + } + if !slices.Contains(vocabs, "core") { + sch := d.draft.allVocabs["core"] + if sch == nil { + sch = d.draft.sch + } + allOf = append(allOf, sch) + } + sch := &Schema{ + Location: "urn:mem:metaschema", + up: urlPtr{url("urn:mem:metaschema"), ""}, + DraftVersion: d.draft.version, + AllOf: allOf, + } + sch.resource = sch + if sch.DraftVersion >= 2020 { + sch.DynamicAnchor = "meta" + sch.dynamicAnchors = map[string]*Schema{ + "meta": sch, + } + } + return sch +} + +// -- + +type ParseIDError struct { + URL string +} + +func (e *ParseIDError) Error() string { + return fmt.Sprintf("error in parsing id at %q", e.URL) +} + +// -- + +type ParseAnchorError struct { + URL string +} + +func (e *ParseAnchorError) Error() string { + return fmt.Sprintf("error in parsing anchor at %q", e.URL) +} + +// -- + +type DuplicateIDError struct { + ID string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateIDError) Error() string { + return fmt.Sprintf("duplicate id %q in %q at %q and %q", e.ID, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +type DuplicateAnchorError struct { + Anchor string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateAnchorError) Error() string { + return fmt.Sprintf("duplicate anchor %q in %q at %q and %q", e.Anchor, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +func joinSubschemas(a1 []SchemaPath, a2 ...SchemaPath) []SchemaPath { + var a []SchemaPath + a = append(a, a1...) + a = append(a, a2...) + return a +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go new file mode 100644 index 000000000..b78b22e2a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go @@ -0,0 +1,708 @@ +package jsonschema + +import ( + "net/netip" + gourl "net/url" + "strconv" + "strings" + "time" +) + +// Format defined specific format. +type Format struct { + // Name of format. + Name string + + // Validate checks if given value is of this format. + Validate func(v any) error +} + +var formats = map[string]*Format{ + "json-pointer": {"json-pointer", validateJSONPointer}, + "relative-json-pointer": {"relative-json-pointer", validateRelativeJSONPointer}, + "uuid": {"uuid", validateUUID}, + "duration": {"duration", validateDuration}, + "period": {"period", validatePeriod}, + "ipv4": {"ipv4", validateIPV4}, + "ipv6": {"ipv6", validateIPV6}, + "hostname": {"hostname", validateHostname}, + "email": {"email", validateEmail}, + "date": {"date", validateDate}, + "time": {"time", validateTime}, + "date-time": {"date-time", validateDateTime}, + "uri": {"uri", validateURI}, + "iri": {"iri", validateURI}, + "uri-reference": {"uri-reference", validateURIReference}, + "iri-reference": {"iri-reference", validateURIReference}, + "uri-template": {"uri-template", validateURITemplate}, + "semver": {"semver", validateSemver}, +} + +// see https://www.rfc-editor.org/rfc/rfc6901#section-3 +func validateJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if s == "" { + return nil + } + if !strings.HasPrefix(s, "/") { + return LocalizableError("not starting with /") + } + for _, tok := range strings.Split(s, "/")[1:] { + escape := false + for _, ch := range tok { + if escape { + escape = false + if ch != '0' && ch != '1' { + return LocalizableError("~ must be followed by 0 or 1") + } + continue + } + if ch == '~' { + escape = true + continue + } + switch { + case ch >= '\x00' && ch <= '\x2E': + case ch >= '\x30' && ch <= '\x7D': + case ch >= '\x7F' && ch <= '\U0010FFFF': + default: + return LocalizableError("invalid character %q", ch) + } + } + if escape { + return LocalizableError("~ must be followed by 0 or 1") + } + } + return nil +} + +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func validateRelativeJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // start with non-negative-integer + numDigits := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("must start with non-negative integer") + } + if numDigits > 1 && strings.HasPrefix(s, "0") { + return LocalizableError("starts with zero") + } + s = s[numDigits:] + + // followed by either json-pointer or '#' + if s == "#" { + return nil + } + return validateJSONPointer(s) +} + +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4 +func validateUUID(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + hexGroups := []int{8, 4, 4, 4, 12} + groups := strings.Split(s, "-") + if len(groups) != len(hexGroups) { + return LocalizableError("must have %d elements", len(hexGroups)) + } + for i, group := range groups { + if len(group) != hexGroups[i] { + return LocalizableError("element %d must be %d characters long", i+1, hexGroups[i]) + } + for _, ch := range group { + switch { + case ch >= '0' && ch <= '9': + case ch >= 'a' && ch <= 'f': + case ch >= 'A' && ch <= 'F': + default: + return LocalizableError("non-hex character %q", ch) + } + } + } + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A +func validateDuration(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // must start with 'P' + s, ok = strings.CutPrefix(s, "P") + if !ok { + return LocalizableError("must start with P") + } + if s == "" { + return LocalizableError("nothing after P") + } + + // dur-week + if s, ok := strings.CutSuffix(s, "W"); ok { + if s == "" { + return LocalizableError("no number in week") + } + for _, ch := range s { + if ch < '0' || ch > '9' { + return LocalizableError("invalid week") + } + } + return nil + } + + allUnits := []string{"YMD", "HMS"} + for i, s := range strings.Split(s, "T") { + if i != 0 && s == "" { + return LocalizableError("no time elements") + } + if i >= len(allUnits) { + return LocalizableError("more than one T") + } + units := allUnits[i] + for s != "" { + digitCount := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + digitCount++ + } else { + break + } + } + if digitCount == 0 { + return LocalizableError("missing number") + } + s = s[digitCount:] + if s == "" { + return LocalizableError("missing unit") + } + unit := s[0] + j := strings.IndexByte(units, unit) + if j == -1 { + if strings.IndexByte(allUnits[i], unit) != -1 { + return LocalizableError("unit %q out of order", unit) + } + return LocalizableError("invalid unit %q", unit) + } + units = units[j+1:] + s = s[1:] + } + } + + return nil +} + +func validateIPV4(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return LocalizableError("expected four decimals") + } + for _, group := range groups { + if len(group) > 1 && group[0] == '0' { + return LocalizableError("leading zeros") + } + n, err := strconv.Atoi(group) + if err != nil { + return err + } + if n < 0 || n > 255 { + return LocalizableError("decimal must be between 0 and 255") + } + } + return nil +} + +func validateIPV6(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if !strings.Contains(s, ":") { + return LocalizableError("missing colon") + } + addr, err := netip.ParseAddr(s) + if err != nil { + return err + } + if addr.Zone() != "" { + return LocalizableError("zone id is not a part of ipv6 address") + } + return nil +} + +// see https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names +func validateHostname(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return LocalizableError("more than 253 characters long") + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if len(label) < 1 || len(label) > 63 { + return LocalizableError("label must be 1 to 63 characters long") + } + + // labels must not start or end with a hyphen + if strings.HasPrefix(label, "-") { + return LocalizableError("label starts with hyphen") + } + if strings.HasSuffix(label, "-") { + return LocalizableError("label ends with hyphen") + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, ch := range label { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case ch == '-': + default: + return LocalizableError("invalid character %q", ch) + } + } + } + return nil +} + +// see https://en.wikipedia.org/wiki/Email_address +func validateEmail(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return LocalizableError("more than 255 characters long") + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return LocalizableError("missing @") + } + local, domain := s[:at], s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return LocalizableError("local part more than 64 characters long") + } + + if len(local) > 1 && strings.HasPrefix(local, `"`) && strings.HasPrefix(local, `"`) { + // quoted + local := local[1 : len(local)-1] + if strings.IndexByte(local, '\\') != -1 || strings.IndexByte(local, '"') != -1 { + return LocalizableError("backslash and quote are not allowed within quoted local part") + } + } else { + // unquoted + if strings.HasPrefix(local, ".") { + return LocalizableError("starts with dot") + } + if strings.HasSuffix(local, ".") { + return LocalizableError("ends with dot") + } + + // consecutive dots not allowed + if strings.Contains(local, "..") { + return LocalizableError("consecutive dots") + } + + // check allowed chars + for _, ch := range local { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case strings.ContainsRune(".!#$%&'*+-/=?^_`{|}~", ch): + default: + return LocalizableError("invalid character %q", ch) + } + } + } + + // domain if enclosed in brackets, must match an IP address + if strings.HasPrefix(domain, "[") && strings.HasSuffix(domain, "]") { + domain = domain[1 : len(domain)-1] + if rem, ok := strings.CutPrefix(domain, "IPv6:"); ok { + if err := validateIPV6(rem); err != nil { + return LocalizableError("invalid ipv6 address: %v", err) + } + return nil + } + if err := validateIPV4(domain); err != nil { + return LocalizableError("invalid ipv4 address: %v", err) + } + return nil + } + + // domain must match the requirements for a hostname + if err := validateHostname(domain); err != nil { + return LocalizableError("invalid domain: %v", err) + } + + return nil +} + +// see see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := time.Parse("2006-01-02", s) + return err +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +// NOTE: golang time package does not support leap seconds. +func validateTime(v any) error { + str, ok := v.(string) + if !ok { + return nil + } + + // min: hh:mm:ssZ + if len(str) < 9 { + return LocalizableError("less than 9 characters long") + } + if str[2] != ':' || str[5] != ':' { + return LocalizableError("missing colon in correct place") + } + + // parse hh:mm:ss + var hms []int + for _, tok := range strings.SplitN(str[:8], ":", 3) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min/sec") + } + if i < 0 { + return LocalizableError("non-positive hour/min/sec") + } + hms = append(hms, i) + } + if len(hms) != 3 { + return LocalizableError("missing hour/min/sec") + } + h, m, s := hms[0], hms[1], hms[2] + if h > 23 || m > 59 || s > 60 { + return LocalizableError("hour/min/sec out of range") + } + str = str[8:] + + // parse sec-frac if present + if rem, ok := strings.CutPrefix(str, "."); ok { + numDigits := 0 + for _, ch := range rem { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("no digits in second fraction") + } + str = rem[numDigits:] + } + + if str != "z" && str != "Z" { + // parse time-numoffset + if len(str) != 6 { + return LocalizableError("offset must be 6 characters long") + } + var sign int + switch str[0] { + case '+': + sign = -1 + case '-': + sign = +1 + default: + return LocalizableError("offset must begin with plus/minus") + } + str = str[1:] + if str[2] != ':' { + return LocalizableError("missing colon in offset in correct place") + } + + var zhm []int + for _, tok := range strings.SplitN(str, ":", 2) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min in offset") + } + if i < 0 { + return LocalizableError("non-positive hour/min in offset") + } + zhm = append(zhm, i) + } + zh, zm := zhm[0], zhm[1] + if zh > 23 || zm > 59 { + return LocalizableError("hour/min in offset out of range") + } + + // apply timezone + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leap second + if s >= 60 && (h != 23 || m != 59) { + return LocalizableError("invalid leap second") + } + + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDateTime(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // min: yyyy-mm-ddThh:mm:ssZ + if len(s) < 20 { + return LocalizableError("less than 20 characters long") + } + + if s[10] != 't' && s[10] != 'T' { + return LocalizableError("11th character must be t or T") + } + if err := validateDate(s[:10]); err != nil { + return LocalizableError("invalid date element: %v", err) + } + if err := validateTime(s[11:]); err != nil { + return LocalizableError("invalid time element: %v", err) + } + return nil +} + +func parseURL(s string) (*gourl.URL, error) { + u, err := gourl.Parse(s) + if err != nil { + return nil, err + } + + // gourl does not validate ipv6 host address + hostName := u.Hostname() + if strings.Contains(hostName, ":") { + if !strings.Contains(u.Host, "[") || !strings.Contains(u.Host, "]") { + return nil, LocalizableError("ipv6 address not enclosed in brackets") + } + if err := validateIPV6(hostName); err != nil { + return nil, LocalizableError("invalid ipv6 address: %v", err) + } + } + + return u, nil +} + +func validateURI(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + if !u.IsAbs() { + return LocalizableError("relative url") + } + return nil +} + +func validateURIReference(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if strings.Contains(s, `\`) { + return LocalizableError(`contains \`) + } + _, err := parseURL(s) + return err +} + +func validateURITemplate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + for _, tok := range strings.Split(u.RawPath, "/") { + tok, err = decode(tok) + if err != nil { + return LocalizableError("percent decode failed: %v", err) + } + want := true + for _, ch := range tok { + var got bool + switch ch { + case '{': + got = true + case '}': + got = false + default: + continue + } + if got != want { + return LocalizableError("nested curly braces") + } + want = !want + } + if !want { + return LocalizableError("no matching closing brace") + } + } + return nil +} + +func validatePeriod(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + slash := strings.IndexByte(s, '/') + if slash == -1 { + return LocalizableError("missing slash") + } + + start, end := s[:slash], s[slash+1:] + if strings.HasPrefix(start, "P") { + if err := validateDuration(start); err != nil { + return LocalizableError("invalid start duration: %v", err) + } + if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } else { + if err := validateDateTime(start); err != nil { + return LocalizableError("invalid start date-time: %v", err) + } + if strings.HasPrefix(end, "P") { + if err := validateDuration(end); err != nil { + return LocalizableError("invalid end duration: %v", err) + } + } else if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } + + return nil +} + +// see https://semver.org/#backusnaur-form-grammar-for-valid-semver-versions +func validateSemver(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // build -- + if i := strings.IndexByte(s, '+'); i != -1 { + build := s[i+1:] + if build == "" { + return LocalizableError("build is empty") + } + for _, buildID := range strings.Split(build, ".") { + if buildID == "" { + return LocalizableError("build identifier is empty") + } + for _, ch := range buildID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + default: + return LocalizableError("invalid character %q in build identifier", ch) + } + } + } + s = s[:i] + } + + // pre-release -- + if i := strings.IndexByte(s, '-'); i != -1 { + preRelease := s[i+1:] + for _, preReleaseID := range strings.Split(preRelease, ".") { + if preReleaseID == "" { + return LocalizableError("pre-release identifier is empty") + } + allDigits := true + for _, ch := range preReleaseID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + allDigits = false + default: + return LocalizableError("invalid character %q in pre-release identifier", ch) + } + } + if allDigits && len(preReleaseID) > 1 && preReleaseID[0] == '0' { + return LocalizableError("pre-release numeric identifier starts with zero") + } + } + s = s[:i] + } + + // versionCore -- + versions := strings.Split(s, ".") + if len(versions) != 3 { + return LocalizableError("versionCore must have 3 numbers separated by dot") + } + names := []string{"major", "minor", "patch"} + for i, version := range versions { + if version == "" { + return LocalizableError("%s is empty", names[i]) + } + if len(version) > 1 && version[0] == '0' { + return LocalizableError("%s starts with zero", names[i]) + } + for _, ch := range version { + if ch < '0' || ch > '9' { + return LocalizableError("%s contains non-digit", names[i]) + } + } + } + + return nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work new file mode 100644 index 000000000..13df855d5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work @@ -0,0 +1,8 @@ +go 1.21.1 + +use ( + . + ./cmd/jv +) + +replace github.com/santhosh-tekuri/jsonschema/v6 v6.0.0 => ./ diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go new file mode 100644 index 000000000..7da112ac8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go @@ -0,0 +1,651 @@ +package kind + +import ( + "fmt" + "math/big" + "strings" + + "golang.org/x/text/message" +) + +// -- + +type InvalidJsonValue struct { + Value any +} + +func (*InvalidJsonValue) KeywordPath() []string { + return nil +} + +func (k *InvalidJsonValue) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid jsonType %T", k.Value) +} + +// -- + +type Schema struct { + Location string +} + +func (*Schema) KeywordPath() []string { + return nil +} + +func (k *Schema) LocalizedString(p *message.Printer) string { + return p.Sprintf("jsonschema validation failed with %s", quote(k.Location)) +} + +// -- + +type Group struct{} + +func (*Group) KeywordPath() []string { + return nil +} + +func (*Group) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type Not struct{} + +func (*Not) KeywordPath() []string { + return nil +} + +func (*Not) LocalizedString(p *message.Printer) string { + return p.Sprintf("not failed") +} + +// -- + +type AllOf struct{} + +func (*AllOf) KeywordPath() []string { + return []string{"allOf"} +} + +func (*AllOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("allOf failed") +} + +// -- + +type AnyOf struct{} + +func (*AnyOf) KeywordPath() []string { + return []string{"anyOf"} +} + +func (*AnyOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("anyOf failed") +} + +// -- + +type OneOf struct { + // Subschemas gives indexes of Subschemas that have matched. + // Value nil, means none of the subschemas matched. + Subschemas []int +} + +func (*OneOf) KeywordPath() []string { + return []string{"oneOf"} +} + +func (k *OneOf) LocalizedString(p *message.Printer) string { + if len(k.Subschemas) == 0 { + return p.Sprintf("oneOf failed, none matched") + } + return p.Sprintf("oneOf failed, subschemas %d, %d matched", k.Subschemas[0], k.Subschemas[1]) +} + +//-- + +type FalseSchema struct{} + +func (*FalseSchema) KeywordPath() []string { + return nil +} + +func (*FalseSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("false schema") +} + +// -- + +type RefCycle struct { + URL string + KeywordLocation1 string + KeywordLocation2 string +} + +func (*RefCycle) KeywordPath() []string { + return nil +} + +func (k *RefCycle) LocalizedString(p *message.Printer) string { + return p.Sprintf("both %s and %s resolve to %q causing reference cycle", k.KeywordLocation1, k.KeywordLocation2, k.URL) +} + +// -- + +type Type struct { + Got string + Want []string +} + +func (*Type) KeywordPath() []string { + return []string{"type"} +} + +func (k *Type) LocalizedString(p *message.Printer) string { + want := strings.Join(k.Want, " or ") + return p.Sprintf("got %s, want %s", k.Got, want) +} + +// -- + +type Enum struct { + Got any + Want []any +} + +// KeywordPath implements jsonschema.ErrorKind. +func (*Enum) KeywordPath() []string { + return []string{"enum"} +} + +func (k *Enum) LocalizedString(p *message.Printer) string { + allPrimitive := true +loop: + for _, item := range k.Want { + switch item.(type) { + case []any, map[string]any: + allPrimitive = false + break loop + } + } + if allPrimitive { + if len(k.Want) == 1 { + return p.Sprintf("value must be %s", display(k.Want[0])) + } + var want []string + for _, v := range k.Want { + want = append(want, display(v)) + } + return p.Sprintf("value must be one of %s", strings.Join(want, ", ")) + } + return p.Sprintf("enum failed") +} + +// -- + +type Const struct { + Got any + Want any +} + +func (*Const) KeywordPath() []string { + return []string{"const"} +} + +func (k *Const) LocalizedString(p *message.Printer) string { + switch want := k.Want.(type) { + case []any, map[string]any: + return p.Sprintf("const failed") + default: + return p.Sprintf("value must be %s", display(want)) + } +} + +// -- + +type Format struct { + Got any + Want string + Err error +} + +func (*Format) KeywordPath() []string { + return []string{"format"} +} + +func (k *Format) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s is not valid %s: %v", display(k.Got), k.Want, localizedError(k.Err, p)) +} + +// -- + +type Reference struct { + Keyword string + URL string +} + +func (k *Reference) KeywordPath() []string { + return []string{k.Keyword} +} + +func (*Reference) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type MinProperties struct { + Got, Want int +} + +func (*MinProperties) KeywordPath() []string { + return []string{"minProperties"} +} + +func (k *MinProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("minProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxProperties struct { + Got, Want int +} + +func (*MaxProperties) KeywordPath() []string { + return []string{"maxProperties"} +} + +func (k *MaxProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MinItems struct { + Got, Want int +} + +func (*MinItems) KeywordPath() []string { + return []string{"minItems"} +} + +func (k *MinItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("minItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxItems struct { + Got, Want int +} + +func (*MaxItems) KeywordPath() []string { + return []string{"maxItems"} +} + +func (k *MaxItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type AdditionalItems struct { + Count int +} + +func (*AdditionalItems) KeywordPath() []string { + return []string{"additionalItems"} +} + +func (k *AdditionalItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("last %d additionalItem(s) not allowed", k.Count) +} + +// -- + +type Required struct { + Missing []string +} + +func (*Required) KeywordPath() []string { + return []string{"required"} +} + +func (k *Required) LocalizedString(p *message.Printer) string { + if len(k.Missing) == 1 { + return p.Sprintf("missing property %s", quote(k.Missing[0])) + } + return p.Sprintf("missing properties %s", joinQuoted(k.Missing, ", ")) +} + +// -- + +type Dependency struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *Dependency) KeywordPath() []string { + return []string{"dependency", k.Prop} +} + +func (k *Dependency) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type DependentRequired struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *DependentRequired) KeywordPath() []string { + return []string{"dependentRequired", k.Prop} +} + +func (k *DependentRequired) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type AdditionalProperties struct { + Properties []string +} + +func (*AdditionalProperties) KeywordPath() []string { + return []string{"additionalProperties"} +} + +func (k *AdditionalProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("additional properties %s not allowed", joinQuoted(k.Properties, ", ")) +} + +// -- + +type PropertyNames struct { + Property string +} + +func (*PropertyNames) KeywordPath() []string { + return []string{"propertyNames"} +} + +func (k *PropertyNames) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid propertyName %s", quote(k.Property)) +} + +// -- + +type UniqueItems struct { + Duplicates [2]int +} + +func (*UniqueItems) KeywordPath() []string { + return []string{"uniqueItems"} +} + +func (k *UniqueItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("items at %d and %d are equal", k.Duplicates[0], k.Duplicates[1]) +} + +// -- + +type Contains struct{} + +func (*Contains) KeywordPath() []string { + return []string{"contains"} +} + +func (*Contains) LocalizedString(p *message.Printer) string { + return p.Sprintf("no items match contains schema") +} + +// -- + +type MinContains struct { + Got []int + Want int +} + +func (*MinContains) KeywordPath() []string { + return []string{"minContains"} +} + +func (k *MinContains) LocalizedString(p *message.Printer) string { + if len(k.Got) == 0 { + return p.Sprintf("min %d items required to match contains schema, but none matched", k.Want) + } else { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("min %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) + } +} + +// -- + +type MaxContains struct { + Got []int + Want int +} + +func (*MaxContains) KeywordPath() []string { + return []string{"maxContains"} +} + +func (k *MaxContains) LocalizedString(p *message.Printer) string { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("max %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) +} + +// -- + +type MinLength struct { + Got, Want int +} + +func (*MinLength) KeywordPath() []string { + return []string{"minLength"} +} + +func (k *MinLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("minLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxLength struct { + Got, Want int +} + +func (*MaxLength) KeywordPath() []string { + return []string{"maxLength"} +} + +func (k *MaxLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type Pattern struct { + Got string + Want string +} + +func (*Pattern) KeywordPath() []string { + return []string{"pattern"} +} + +func (k *Pattern) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s does not match pattern %s", quote(k.Got), quote(k.Want)) +} + +// -- + +type ContentEncoding struct { + Want string + Err error +} + +func (*ContentEncoding) KeywordPath() []string { + return []string{"contentEncoding"} +} + +func (k *ContentEncoding) LocalizedString(p *message.Printer) string { + return p.Sprintf("value is not %s encoded: %v", quote(k.Want), localizedError(k.Err, p)) +} + +// -- + +type ContentMediaType struct { + Got []byte + Want string + Err error +} + +func (*ContentMediaType) KeywordPath() []string { + return []string{"contentMediaType"} +} + +func (k *ContentMediaType) LocalizedString(p *message.Printer) string { + return p.Sprintf("value if not of mediatype %s: %v", quote(k.Want), k.Err) +} + +// -- + +type ContentSchema struct{} + +func (*ContentSchema) KeywordPath() []string { + return []string{"contentSchema"} +} + +func (*ContentSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("contentSchema failed") +} + +// -- + +type Minimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Minimum) KeywordPath() []string { + return []string{"minimum"} +} + +func (k *Minimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("minimum: got %v, want %v", got, want) +} + +// -- + +type Maximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Maximum) KeywordPath() []string { + return []string{"maximum"} +} + +func (k *Maximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("maximum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMinimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMinimum) KeywordPath() []string { + return []string{"exclusiveMinimum"} +} + +func (k *ExclusiveMinimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMinimum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMaximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMaximum) KeywordPath() []string { + return []string{"exclusiveMaximum"} +} + +func (k *ExclusiveMaximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMaximum: got %v, want %v", got, want) +} + +// -- + +type MultipleOf struct { + Got *big.Rat + Want *big.Rat +} + +func (*MultipleOf) KeywordPath() []string { + return []string{"multipleOf"} +} + +func (k *MultipleOf) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("multipleOf: got %v, want %v", got, want) +} + +// -- + +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func joinQuoted(arr []string, sep string) string { + var sb strings.Builder + for _, s := range arr { + if sb.Len() > 0 { + sb.WriteString(sep) + } + sb.WriteString(quote(s)) + } + return sb.String() +} + +// to be used only for primitive. +func display(v any) string { + switch v := v.(type) { + case string: + return quote(v) + case []any, map[string]any: + return "value" + default: + return fmt.Sprintf("%v", v) + } +} + +func localizedError(err error, p *message.Printer) string { + if err, ok := err.(interface{ LocalizedError(*message.Printer) string }); ok { + return err.LocalizedError(p) + } + return err.Error() +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go new file mode 100644 index 000000000..ce0170e20 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go @@ -0,0 +1,266 @@ +package jsonschema + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + gourl "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +// URLLoader knows how to load json from given url. +type URLLoader interface { + // Load loads json from given absolute url. + Load(url string) (any, error) +} + +// -- + +// FileLoader loads json file url. +type FileLoader struct{} + +func (l FileLoader) Load(url string) (any, error) { + path, err := l.ToFile(url) + if err != nil { + return nil, err + } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return UnmarshalJSON(f) +} + +// ToFile is helper method to convert file url to file path. +func (l FileLoader) ToFile(url string) (string, error) { + u, err := gourl.Parse(url) + if err != nil { + return "", err + } + if u.Scheme != "file" { + return "", fmt.Errorf("invalid file url: %s", u) + } + path := u.Path + if runtime.GOOS == "windows" { + path = strings.TrimPrefix(path, "/") + path = filepath.FromSlash(path) + } + return path, nil +} + +// -- + +// SchemeURLLoader delegates to other [URLLoaders] +// based on url scheme. +type SchemeURLLoader map[string]URLLoader + +func (l SchemeURLLoader) Load(url string) (any, error) { + u, err := gourl.Parse(url) + if err != nil { + return nil, err + } + ll, ok := l[u.Scheme] + if !ok { + return nil, &UnsupportedURLSchemeError{u.String()} + } + return ll.Load(url) +} + +// -- + +//go:embed metaschemas +var metaFS embed.FS + +func openMeta(url string) (fs.File, error) { + u, meta := strings.CutPrefix(url, "http://json-schema.org/") + if !meta { + u, meta = strings.CutPrefix(url, "https://json-schema.org/") + } + if meta { + if u == "schema" { + return openMeta(draftLatest.url) + } + f, err := metaFS.Open("metaschemas/" + u) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + return f, err + } + return nil, nil + +} + +func isMeta(url string) bool { + f, err := openMeta(url) + if err != nil { + return true + } + if f != nil { + f.Close() + return true + } + return false +} + +func loadMeta(url string) (any, error) { + f, err := openMeta(url) + if err != nil { + return nil, err + } + if f == nil { + return nil, nil + } + defer f.Close() + return UnmarshalJSON(f) +} + +// -- + +type defaultLoader struct { + docs map[url]any // docs loaded so far + loader URLLoader +} + +func (l *defaultLoader) add(url url, doc any) bool { + if _, ok := l.docs[url]; ok { + return false + } + l.docs[url] = doc + return true +} + +func (l *defaultLoader) load(url url) (any, error) { + if doc, ok := l.docs[url]; ok { + return doc, nil + } + doc, err := loadMeta(url.String()) + if err != nil { + return nil, err + } + if doc != nil { + l.add(url, doc) + return doc, nil + } + if l.loader == nil { + return nil, &LoadURLError{url.String(), errors.New("no URLLoader set")} + } + doc, err = l.loader.Load(url.String()) + if err != nil { + return nil, &LoadURLError{URL: url.String(), Err: err} + } + l.add(url, doc) + return doc, nil +} + +func (l *defaultLoader) getDraft(up urlPtr, doc any, defaultDraft *Draft, cycle map[url]struct{}) (*Draft, error) { + obj, ok := doc.(map[string]any) + if !ok { + return defaultDraft, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return defaultDraft, nil + } + if draft := draftFromURL(sch); draft != nil { + return draft, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &InvalidMetaSchemaURLError{up.String(), err} + } + schUrl := url(sch) + if up.ptr.isEmpty() && schUrl == up.url { + return nil, &UnsupportedDraftError{schUrl.String()} + } + if _, ok := cycle[schUrl]; ok { + return nil, &MetaSchemaCycleError{schUrl.String()} + } + cycle[schUrl] = struct{}{} + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return l.getDraft(urlPtr{schUrl, ""}, doc, defaultDraft, cycle) +} + +func (l *defaultLoader) getMetaVocabs(doc any, draft *Draft, vocabularies map[string]*Vocabulary) ([]string, error) { + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return nil, nil + } + if draft := draftFromURL(sch); draft != nil { + return nil, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &ParseURLError{sch, err} + } + schUrl := url(sch) + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return draft.getVocabs(schUrl, doc, vocabularies) +} + +// -- + +type LoadURLError struct { + URL string + Err error +} + +func (e *LoadURLError) Error() string { + return fmt.Sprintf("failing loading %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedURLSchemeError struct { + url string +} + +func (e *UnsupportedURLSchemeError) Error() string { + return fmt.Sprintf("no URLLoader registered for %q", e.url) +} + +// -- + +type ResourceExistsError struct { + url string +} + +func (e *ResourceExistsError) Error() string { + return fmt.Sprintf("resource for %q already exists", e.url) +} + +// -- + +// UnmarshalJSON unmarshals into [any] without losing +// number precision using [json.Number]. +func UnmarshalJSON(r io.Reader) (any, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc any + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if _, err := decoder.Token(); err == nil || err != io.EOF { + return nil, fmt.Errorf("invalid character after top-level value") + } + return doc, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema new file mode 100644 index 000000000..b2a7ff0f5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema @@ -0,0 +1,151 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema new file mode 100644 index 000000000..fa22ad1b0 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema @@ -0,0 +1,150 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema new file mode 100644 index 000000000..326759a62 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema @@ -0,0 +1,172 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator new file mode 100644 index 000000000..857d2d495 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator @@ -0,0 +1,55 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content new file mode 100644 index 000000000..fa5d20b8d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core new file mode 100644 index 000000000..bf5731985 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format new file mode 100644 index 000000000..fe553c239 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data new file mode 100644 index 000000000..5c95715c4 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation new file mode 100644 index 000000000..f3525e076 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema new file mode 100644 index 000000000..f433389be --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema @@ -0,0 +1,41 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator new file mode 100644 index 000000000..0ef24edc8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator @@ -0,0 +1,47 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content new file mode 100644 index 000000000..0330ff0a8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core new file mode 100644 index 000000000..c4de7005a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core @@ -0,0 +1,50 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation new file mode 100644 index 000000000..0aa07d1c1 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion new file mode 100644 index 000000000..38613bff6 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data new file mode 100644 index 000000000..30e283714 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated new file mode 100644 index 000000000..e9e093d12 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation new file mode 100644 index 000000000..4e016ed2b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema new file mode 100644 index 000000000..364f8ada6 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go new file mode 100644 index 000000000..f1494b13a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go @@ -0,0 +1,549 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "strconv" +) + +type objCompiler struct { + c *Compiler + obj map[string]any + up urlPtr + r *root + res *resource + q *queue +} + +func (c *objCompiler) compile(s *Schema) error { + // id -- + if id := c.res.dialect.draft.getID(c.obj); id != "" { + s.ID = id + } + + // anchor -- + if s.DraftVersion < 2019 { + // anchor is specified in id + id := c.string(c.res.dialect.draft.id) + if id != "" { + _, f := split(id) + if f != "" { + var err error + s.Anchor, err = decode(f) + if err != nil { + return &ParseAnchorError{URL: s.Location} + } + } + } + } else { + s.Anchor = c.string("$anchor") + } + + if err := c.compileDraft4(s); err != nil { + return err + } + if s.DraftVersion >= 6 { + if err := c.compileDraft6(s); err != nil { + return err + } + } + if s.DraftVersion >= 7 { + if err := c.compileDraft7(s); err != nil { + return err + } + } + if s.DraftVersion >= 2019 { + if err := c.compileDraft2019(s); err != nil { + return err + } + } + if s.DraftVersion >= 2020 { + if err := c.compileDraft2020(s); err != nil { + return err + } + } + + // vocabularies + vocabs := c.res.dialect.activeVocabs(c.c.roots.assertVocabs, c.c.roots.vocabularies) + for _, vocab := range vocabs { + v := c.c.roots.vocabularies[vocab] + if v == nil { + continue + } + ext, err := v.Compile(&CompilerContext{c}, c.obj) + if err != nil { + return err + } + if ext != nil { + s.Extensions = append(s.Extensions, ext) + } + } + + return nil +} + +func (c *objCompiler) compileDraft4(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.Ref, err = c.enqueueRef("$ref"); err != nil { + return err + } + if s.DraftVersion < 2019 && s.Ref != nil { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if c.hasVocab("applicator") { + s.AllOf = c.enqueueArr("allOf") + s.AnyOf = c.enqueueArr("anyOf") + s.OneOf = c.enqueueArr("oneOf") + s.Not = c.enqueueProp("not") + + if s.DraftVersion < 2020 { + if items, ok := c.obj["items"]; ok { + if _, ok := items.([]any); ok { + s.Items = c.enqueueArr("items") + s.AdditionalItems = c.enqueueAdditional("additionalItems") + } else { + s.Items = c.enqueueProp("items") + } + } + } + + s.Properties = c.enqueueMap("properties") + if m := c.enqueueMap("patternProperties"); m != nil { + s.PatternProperties = map[Regexp]*Schema{} + for pname, sch := range m { + re, err := c.c.roots.regexpEngine(pname) + if err != nil { + return &InvalidRegexError{c.up.format("patternProperties"), pname, err} + } + s.PatternProperties[re] = sch + } + } + s.AdditionalProperties = c.enqueueAdditional("additionalProperties") + + if m := c.objVal("dependencies"); m != nil { + s.Dependencies = map[string]any{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.Dependencies[pname] = toStrings(arr) + } else { + ptr := c.up.ptr.append2("dependencies", pname) + s.Dependencies[pname] = c.enqueuePtr(ptr) + } + } + } + } + + if c.hasVocab("validation") { + if t, ok := c.obj["type"]; ok { + s.Types = newTypes(t) + } + if arr := c.arrVal("enum"); arr != nil { + s.Enum = newEnum(arr) + } + s.MultipleOf = c.numVal("multipleOf") + s.Maximum = c.numVal("maximum") + if c.boolean("exclusiveMaximum") { + s.ExclusiveMaximum = s.Maximum + s.Maximum = nil + } else { + s.ExclusiveMaximum = c.numVal("exclusiveMaximum") + } + s.Minimum = c.numVal("minimum") + if c.boolean("exclusiveMinimum") { + s.ExclusiveMinimum = s.Minimum + s.Minimum = nil + } else { + s.ExclusiveMinimum = c.numVal("exclusiveMinimum") + } + + s.MinLength = c.intVal("minLength") + s.MaxLength = c.intVal("maxLength") + if pat := c.strVal("pattern"); pat != nil { + s.Pattern, err = c.c.roots.regexpEngine(*pat) + if err != nil { + return &InvalidRegexError{c.up.format("pattern"), *pat, err} + } + } + + s.MinItems = c.intVal("minItems") + s.MaxItems = c.intVal("maxItems") + s.UniqueItems = c.boolean("uniqueItems") + + s.MaxProperties = c.intVal("maxProperties") + s.MinProperties = c.intVal("minProperties") + if arr := c.arrVal("required"); arr != nil { + s.Required = toStrings(arr) + } + } + + // format -- + if c.assertFormat(s.DraftVersion) { + if f := c.strVal("format"); f != nil { + if *f == "regex" { + s.Format = &Format{ + Name: "regex", + Validate: c.c.roots.regexpEngine.validate, + } + } else { + s.Format = c.c.formats[*f] + if s.Format == nil { + s.Format = formats[*f] + } + } + } + } + + // annotations -- + s.Title = c.string("title") + s.Description = c.string("description") + if v, ok := c.obj["default"]; ok { + s.Default = &v + } + + return nil +} + +func (c *objCompiler) compileDraft6(s *Schema) error { + if c.hasVocab("applicator") { + s.Contains = c.enqueueProp("contains") + s.PropertyNames = c.enqueueProp("propertyNames") + } + if c.hasVocab("validation") { + if v, ok := c.obj["const"]; ok { + s.Const = &v + } + } + return nil +} + +func (c *objCompiler) compileDraft7(s *Schema) error { + if c.hasVocab("applicator") { + s.If = c.enqueueProp("if") + if s.If != nil { + b := c.boolVal("if") + if b == nil || *b { + s.Then = c.enqueueProp("then") + } + if b == nil || !*b { + s.Else = c.enqueueProp("else") + } + } + } + + if c.c.assertContent { + if ce := c.strVal("contentEncoding"); ce != nil { + s.ContentEncoding = c.c.decoders[*ce] + if s.ContentEncoding == nil { + s.ContentEncoding = decoders[*ce] + } + } + if cm := c.strVal("contentMediaType"); cm != nil { + s.ContentMediaType = c.c.mediaTypes[*cm] + if s.ContentMediaType == nil { + s.ContentMediaType = mediaTypes[*cm] + } + } + } + + // annotations -- + s.Comment = c.string("$comment") + s.ReadOnly = c.boolean("readOnly") + s.WriteOnly = c.boolean("writeOnly") + if arr, ok := c.obj["examples"].([]any); ok { + s.Examples = arr + } + + return nil +} + +func (c *objCompiler) compileDraft2019(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.RecursiveRef, err = c.enqueueRef("$recursiveRef"); err != nil { + return err + } + s.RecursiveAnchor = c.boolean("$recursiveAnchor") + } + + if c.hasVocab("validation") { + if s.Contains != nil { + s.MinContains = c.intVal("minContains") + s.MaxContains = c.intVal("maxContains") + } + if m := c.objVal("dependentRequired"); m != nil { + s.DependentRequired = map[string][]string{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.DependentRequired[pname] = toStrings(arr) + } + } + } + } + + if c.hasVocab("applicator") { + s.DependentSchemas = c.enqueueMap("dependentSchemas") + } + + var unevaluated bool + if s.DraftVersion == 2019 { + unevaluated = c.hasVocab("applicator") + } else { + unevaluated = c.hasVocab("unevaluated") + } + if unevaluated { + s.UnevaluatedItems = c.enqueueProp("unevaluatedItems") + s.UnevaluatedProperties = c.enqueueProp("unevaluatedProperties") + } + + if c.c.assertContent { + if s.ContentMediaType != nil && s.ContentMediaType.UnmarshalJSON != nil { + s.ContentSchema = c.enqueueProp("contentSchema") + } + } + + // annotations -- + s.Deprecated = c.boolean("deprecated") + + return nil +} + +func (c *objCompiler) compileDraft2020(s *Schema) error { + if c.hasVocab("core") { + sch, err := c.enqueueRef("$dynamicRef") + if err != nil { + return err + } + if sch != nil { + dref := c.strVal("$dynamicRef") + _, frag, err := splitFragment(*dref) + if err != nil { + return err + } + var anch string + if anchor, ok := frag.convert().(anchor); ok { + anch = string(anchor) + } + s.DynamicRef = &DynamicRef{sch, anch} + } + s.DynamicAnchor = c.string("$dynamicAnchor") + } + + if c.hasVocab("applicator") { + s.PrefixItems = c.enqueueArr("prefixItems") + s.Items2020 = c.enqueueProp("items") + } + + return nil +} + +// enqueue helpers -- + +func (c *objCompiler) enqueuePtr(ptr jsonPointer) *Schema { + up := urlPtr{c.up.url, ptr} + return c.c.enqueue(c.q, up) +} + +func (c *objCompiler) enqueueRef(pname string) (*Schema, error) { + ref := c.strVal(pname) + if ref == nil { + return nil, nil + } + baseURL := c.res.id + // baseURL := c.r.baseURL(c.up.ptr) + uf, err := baseURL.join(*ref) + if err != nil { + return nil, err + } + + up, err := c.r.resolve(*uf) + if err != nil { + return nil, err + } + if up != nil { + // local ref + return c.enqueuePtr(up.ptr), nil + } + + // remote ref + up_, err := c.c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.c.enqueue(c.q, up_), nil +} + +func (c *objCompiler) enqueueProp(pname string) *Schema { + if _, ok := c.obj[pname]; !ok { + return nil + } + ptr := c.up.ptr.append(pname) + return c.enqueuePtr(ptr) +} + +func (c *objCompiler) enqueueArr(pname string) []*Schema { + arr := c.arrVal(pname) + if arr == nil { + return nil + } + sch := make([]*Schema, len(arr)) + for i := range arr { + ptr := c.up.ptr.append2(pname, strconv.Itoa(i)) + sch[i] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueMap(pname string) map[string]*Schema { + obj := c.objVal(pname) + if obj == nil { + return nil + } + sch := make(map[string]*Schema) + for k := range obj { + ptr := c.up.ptr.append2(pname, k) + sch[k] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueAdditional(pname string) any { + if b := c.boolVal(pname); b != nil { + return *b + } + if sch := c.enqueueProp(pname); sch != nil { + return sch + } + return nil +} + +// -- + +func (c *objCompiler) hasVocab(name string) bool { + return c.res.dialect.hasVocab(name) +} + +func (c *objCompiler) assertFormat(draftVersion int) bool { + if c.c.assertFormat || draftVersion < 2019 { + return true + } + if draftVersion == 2019 { + return c.hasVocab("format") + } else { + return c.hasVocab("format-assertion") + } +} + +// value helpers -- + +func (c *objCompiler) boolVal(pname string) *bool { + v, ok := c.obj[pname] + if !ok { + return nil + } + b, ok := v.(bool) + if !ok { + return nil + } + return &b +} + +func (c *objCompiler) boolean(pname string) bool { + b := c.boolVal(pname) + return b != nil && *b +} + +func (c *objCompiler) strVal(pname string) *string { + v, ok := c.obj[pname] + if !ok { + return nil + } + s, ok := v.(string) + if !ok { + return nil + } + return &s +} + +func (c *objCompiler) string(pname string) string { + if s := c.strVal(pname); s != nil { + return *s + } + return "" +} + +func (c *objCompiler) numVal(pname string) *big.Rat { + v, ok := c.obj[pname] + if !ok { + return nil + } + switch v.(type) { + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if n, ok := new(big.Rat).SetString(fmt.Sprint(v)); ok { + return n + } + } + return nil +} + +func (c *objCompiler) intVal(pname string) *int { + if n := c.numVal(pname); n != nil && n.IsInt() { + n := int(n.Num().Int64()) + return &n + } + return nil +} + +func (c *objCompiler) objVal(pname string) map[string]any { + v, ok := c.obj[pname] + if !ok { + return nil + } + obj, ok := v.(map[string]any) + if !ok { + return nil + } + return obj +} + +func (c *objCompiler) arrVal(pname string) []any { + v, ok := c.obj[pname] + if !ok { + return nil + } + arr, ok := v.([]any) + if !ok { + return nil + } + return arr +} + +// -- + +type InvalidRegexError struct { + URL string + Regex string + Err error +} + +func (e *InvalidRegexError) Error() string { + return fmt.Sprintf("invalid regex %q at %q: %v", e.Regex, e.URL, e.Err) +} + +// -- + +func toStrings(arr []any) []string { + var strings []string + for _, item := range arr { + if s, ok := item.(string); ok { + strings = append(strings, s) + } + } + return strings +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go new file mode 100644 index 000000000..4995d7b8b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go @@ -0,0 +1,212 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +var defaultPrinter = message.NewPrinter(language.English) + +// format --- + +func (e *ValidationError) schemaURL() string { + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + return ref.URL + } else { + return e.SchemaURL + } +} + +func (e *ValidationError) absoluteKeywordLocation() string { + var schemaURL string + var keywordPath []string + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + schemaURL = ref.URL + keywordPath = nil + } else { + schemaURL = e.SchemaURL + keywordPath = e.ErrorKind.KeywordPath() + } + return fmt.Sprintf("%s%s", schemaURL, encode(jsonPtr(keywordPath))) +} + +func (e *ValidationError) skip() bool { + if len(e.Causes) == 1 { + _, ok := e.ErrorKind.(*kind.Reference) + return ok + } + return false +} + +func (e *ValidationError) display(sb *strings.Builder, verbose bool, indent int, absKwLoc string, p *message.Printer) { + if !e.skip() { + if indent > 0 { + sb.WriteByte('\n') + for i := 0; i < indent-1; i++ { + sb.WriteString(" ") + } + sb.WriteString("- ") + } + indent = indent + 1 + + prevAbsKwLoc := absKwLoc + absKwLoc = e.absoluteKeywordLocation() + + if _, ok := e.ErrorKind.(*kind.Schema); ok { + sb.WriteString(e.ErrorKind.LocalizedString(p)) + } else { + sb.WriteString(p.Sprintf("at %s", quote(jsonPtr(e.InstanceLocation)))) + if verbose { + schLoc := absKwLoc + if prevAbsKwLoc != "" { + pu, _ := split(prevAbsKwLoc) + u, f := split(absKwLoc) + if u == pu { + schLoc = fmt.Sprintf("S#%s", f) + } + } + fmt.Fprintf(sb, " [%s]", schLoc) + } + fmt.Fprintf(sb, ": %s", e.ErrorKind.LocalizedString(p)) + } + } + for _, cause := range e.Causes { + cause.display(sb, verbose, indent, absKwLoc, p) + } +} + +func (e *ValidationError) Error() string { + return e.LocalizedError(defaultPrinter) +} + +func (e *ValidationError) LocalizedError(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, false, 0, "", p) + return sb.String() +} + +func (e *ValidationError) GoString() string { + return e.LocalizedGoString(defaultPrinter) +} + +func (e *ValidationError) LocalizedGoString(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, true, 0, "", p) + return sb.String() +} + +func jsonPtr(tokens []string) string { + var sb strings.Builder + for _, tok := range tokens { + sb.WriteByte('/') + sb.WriteString(escape(tok)) + } + return sb.String() +} + +// -- + +// Flag is output format with simple boolean property valid. +type FlagOutput struct { + Valid bool `json:"valid"` +} + +// The `Flag` output format, merely the boolean result. +func (e *ValidationError) FlagOutput() *FlagOutput { + return &FlagOutput{Valid: false} +} + +// -- + +type OutputUnit struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"AbsoluteKeywordLocation,omitempty"` + InstanceLocation string `json:"instanceLocation"` + Error *OutputError `json:"error,omitempty"` + Errors []OutputUnit `json:"errors,omitempty"` +} + +type OutputError struct { + Kind ErrorKind + p *message.Printer +} + +func (k OutputError) MarshalJSON() ([]byte, error) { + return json.Marshal(k.Kind.LocalizedString(k.p)) +} + +// The `Basic` structure, a flat list of output units. +func (e *ValidationError) BasicOutput() *OutputUnit { + return e.LocalizedBasicOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedBasicOutput(p *message.Printer) *OutputUnit { + out := e.output(true, false, "", "", p) + return &out +} + +// The `Detailed` structure, based on the schema. +func (e *ValidationError) DetailedOutput() *OutputUnit { + return e.LocalizedDetailedOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedDetailedOutput(p *message.Printer) *OutputUnit { + out := e.output(false, false, "", "", p) + return &out +} + +func (e *ValidationError) output(flatten, inRef bool, schemaURL, kwLoc string, p *message.Printer) OutputUnit { + if !inRef { + if _, ok := e.ErrorKind.(*kind.Reference); ok { + inRef = true + } + } + if schemaURL != "" { + kwLoc += e.SchemaURL[len(schemaURL):] + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + kwLoc += jsonPtr(ref.KeywordPath()) + } + } + schemaURL = e.schemaURL() + + keywordLocation := kwLoc + if _, ok := e.ErrorKind.(*kind.Reference); !ok { + keywordLocation += jsonPtr(e.ErrorKind.KeywordPath()) + } + + out := OutputUnit{ + Valid: false, + InstanceLocation: jsonPtr(e.InstanceLocation), + KeywordLocation: keywordLocation, + } + if inRef { + out.AbsoluteKeywordLocation = e.absoluteKeywordLocation() + } + for _, cause := range e.Causes { + causeOut := cause.output(flatten, inRef, schemaURL, kwLoc, p) + if cause.skip() { + causeOut = causeOut.Errors[0] + } + if flatten { + errors := causeOut.Errors + causeOut.Errors = nil + causeOut.Error = &OutputError{cause.ErrorKind, p} + out.Errors = append(out.Errors, causeOut) + if len(errors) > 0 { + out.Errors = append(out.Errors, errors...) + } + } else { + out.Errors = append(out.Errors, causeOut) + } + } + if len(out.Errors) == 0 { + out.Error = &OutputError{e.ErrorKind, p} + } + return out +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go new file mode 100644 index 000000000..576a2a47f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go @@ -0,0 +1,142 @@ +package jsonschema + +import ( + "strconv" + "strings" +) + +// Position tells possible tokens in json. +type Position interface { + collect(v any, ptr jsonPointer) map[jsonPointer]any +} + +// -- + +type AllProp struct{} + +func (AllProp) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for pname, pvalue := range obj { + m[ptr.append(pname)] = pvalue + } + return m +} + +// -- + +type AllItem struct{} + +func (AllItem) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for i, item := range arr { + m[ptr.append(strconv.Itoa(i))] = item + } + return m +} + +// -- + +type Prop string + +func (p Prop) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + pvalue, ok := obj[string(p)] + if !ok { + return nil + } + return map[jsonPointer]any{ + ptr.append(string(p)): pvalue, + } +} + +// -- + +type Item int + +func (i Item) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + if i < 0 || int(i) >= len(arr) { + return nil + } + return map[jsonPointer]any{ + ptr.append(strconv.Itoa(int(i))): arr[int(i)], + } +} + +// -- + +// SchemaPath tells where to look for subschema inside keyword. +type SchemaPath []Position + +func schemaPath(path string) SchemaPath { + var sp SchemaPath + for _, tok := range strings.Split(path, "/") { + var pos Position + switch tok { + case "*": + pos = AllProp{} + case "[]": + pos = AllItem{} + default: + if i, err := strconv.Atoi(tok); err == nil { + pos = Item(i) + } else { + pos = Prop(tok) + } + } + sp = append(sp, pos) + } + return sp +} + +func (sp SchemaPath) collect(v any, ptr jsonPointer) map[jsonPointer]any { + if len(sp) == 0 { + return map[jsonPointer]any{ + ptr: v, + } + } + p, sp := sp[0], sp[1:] + m := p.collect(v, ptr) + mm := map[jsonPointer]any{} + for ptr, v := range m { + m = sp.collect(v, ptr) + for k, v := range m { + mm[k] = v + } + } + return mm +} + +func (sp SchemaPath) String() string { + var sb strings.Builder + for _, pos := range sp { + if sb.Len() != 0 { + sb.WriteByte('/') + } + switch pos := pos.(type) { + case AllProp: + sb.WriteString("*") + case AllItem: + sb.WriteString("[]") + case Prop: + sb.WriteString(string(pos)) + case Item: + sb.WriteString(strconv.Itoa(int(pos))) + } + } + return sb.String() +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go new file mode 100644 index 000000000..860690102 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go @@ -0,0 +1,202 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +type root struct { + url url + doc any + resources map[jsonPointer]*resource + subschemasProcessed map[jsonPointer]struct{} +} + +func (r *root) rootResource() *resource { + return r.resources[""] +} + +func (r *root) resource(ptr jsonPointer) *resource { + for { + if res, ok := r.resources[ptr]; ok { + return res + } + slash := strings.LastIndexByte(string(ptr), '/') + if slash == -1 { + break + } + ptr = ptr[:slash] + } + return r.rootResource() +} + +func (r *root) resolveFragmentIn(frag fragment, res *resource) (urlPtr, error) { + var ptr jsonPointer + switch f := frag.convert().(type) { + case jsonPointer: + ptr = res.ptr.concat(f) + case anchor: + aptr, ok := res.anchors[f] + if !ok { + return urlPtr{}, &AnchorNotFoundError{ + URL: r.url.String(), + Reference: (&urlFrag{res.id, frag}).String(), + } + } + ptr = aptr + } + return urlPtr{r.url, ptr}, nil +} + +func (r *root) resolveFragment(frag fragment) (urlPtr, error) { + return r.resolveFragmentIn(frag, r.rootResource()) +} + +// resovles urlFrag to urlPtr from root. +// returns nil if it is external. +func (r *root) resolve(uf urlFrag) (*urlPtr, error) { + var res *resource + if uf.url == r.url { + res = r.rootResource() + } else { + // look for resource with id==uf.url + for _, v := range r.resources { + if v.id == uf.url { + res = v + break + } + } + if res == nil { + return nil, nil // external url + } + } + up, err := r.resolveFragmentIn(uf.frag, res) + return &up, err +} + +func (r *root) collectAnchors(sch any, schPtr jsonPointer, res *resource) error { + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + addAnchor := func(anchor anchor) error { + ptr1, ok := res.anchors[anchor] + if ok { + if ptr1 == schPtr { + // anchor with same root_ptr already exists + return nil + } + return &DuplicateAnchorError{ + string(anchor), r.url.String(), string(ptr1), string(schPtr), + } + } + res.anchors[anchor] = schPtr + return nil + } + + if res.dialect.draft.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return nil + } + // anchor is specified in id + if id, ok := strVal(obj, res.dialect.draft.id); ok { + _, frag, err := splitFragment(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseAnchorError{loc.String()} + } + if anchor, ok := frag.convert().(anchor); ok { + if err := addAnchor(anchor); err != nil { + return err + } + } + } + } + if res.dialect.draft.version >= 2019 { + if s, ok := strVal(obj, "$anchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + } + } + if res.dialect.draft.version >= 2020 { + if s, ok := strVal(obj, "$dynamicAnchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + res.dynamicAnchors = append(res.dynamicAnchors, anchor(s)) + } + } + + return nil +} + +func (r *root) clone() *root { + processed := map[jsonPointer]struct{}{} + for k := range r.subschemasProcessed { + processed[k] = struct{}{} + } + resources := map[jsonPointer]*resource{} + for k, v := range r.resources { + resources[k] = v.clone() + } + return &root{ + url: r.url, + doc: r.doc, + resources: resources, + subschemasProcessed: processed, + } +} + +// -- + +type resource struct { + ptr jsonPointer + id url + dialect dialect + anchors map[anchor]jsonPointer + dynamicAnchors []anchor +} + +func newResource(ptr jsonPointer, id url) *resource { + return &resource{ptr: ptr, id: id, anchors: make(map[anchor]jsonPointer)} +} + +func (res *resource) clone() *resource { + anchors := map[anchor]jsonPointer{} + for k, v := range res.anchors { + anchors[k] = v + } + return &resource{ + ptr: res.ptr, + id: res.id, + dialect: res.dialect, + anchors: anchors, + dynamicAnchors: slices.Clone(res.dynamicAnchors), + } +} + +//-- + +type UnsupportedVocabularyError struct { + URL string + Vocabulary string +} + +func (e *UnsupportedVocabularyError) Error() string { + return fmt.Sprintf("unsupported vocabulary %q in %q", e.Vocabulary, e.URL) +} + +// -- + +type AnchorNotFoundError struct { + URL string + Reference string +} + +func (e *AnchorNotFoundError) Error() string { + return fmt.Sprintf("anchor in %q not found in schema %q", e.Reference, e.URL) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go new file mode 100644 index 000000000..b9b79baa3 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go @@ -0,0 +1,289 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +type roots struct { + defaultDraft *Draft + roots map[url]*root + loader defaultLoader + regexpEngine RegexpEngine + vocabularies map[string]*Vocabulary + assertVocabs bool +} + +func newRoots() *roots { + return &roots{ + defaultDraft: draftLatest, + roots: map[url]*root{}, + loader: defaultLoader{ + docs: map[url]any{}, + loader: FileLoader{}, + }, + regexpEngine: goRegexpCompile, + vocabularies: map[string]*Vocabulary{}, + } +} + +func (rr *roots) orLoad(u url) (*root, error) { + if r, ok := rr.roots[u]; ok { + return r, nil + } + doc, err := rr.loader.load(u) + if err != nil { + return nil, err + } + return rr.addRoot(u, doc) +} + +func (rr *roots) addRoot(u url, doc any) (*root, error) { + r := &root{ + url: u, + doc: doc, + resources: map[jsonPointer]*resource{}, + subschemasProcessed: map[jsonPointer]struct{}{}, + } + if err := rr.collectResources(r, doc, u, "", dialect{rr.defaultDraft, nil}); err != nil { + return nil, err + } + if !strings.HasPrefix(u.String(), "http://json-schema.org/") && + !strings.HasPrefix(u.String(), "https://json-schema.org/") { + if err := rr.validate(r, doc, ""); err != nil { + return nil, err + } + } + + rr.roots[u] = r + return r, nil +} + +func (rr *roots) resolveFragment(uf urlFrag) (urlPtr, error) { + r, err := rr.orLoad(uf.url) + if err != nil { + return urlPtr{}, err + } + return r.resolveFragment(uf.frag) +} + +func (rr *roots) collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := r.subschemasProcessed[schPtr]; ok { + return nil + } + if err := rr._collectResources(r, sch, base, schPtr, fallback); err != nil { + return err + } + r.subschemasProcessed[schPtr] = struct{}{} + return nil +} + +func (rr *roots) _collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := sch.(bool); ok { + if schPtr.isEmpty() { + // root resource + res := newResource(schPtr, base) + res.dialect = fallback + r.resources[schPtr] = res + } + return nil + } + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + hasSchema := false + if sch, ok := obj["$schema"]; ok { + if _, ok := sch.(string); ok { + hasSchema = true + } + } + + draft, err := rr.loader.getDraft(urlPtr{r.url, schPtr}, sch, fallback.draft, map[url]struct{}{}) + if err != nil { + return err + } + id := draft.getID(obj) + if id == "" && !schPtr.isEmpty() { + // ignore $schema + draft = fallback.draft + hasSchema = false + id = draft.getID(obj) + } + + var res *resource + if id != "" { + uf, err := base.join(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseIDError{loc.String()} + } + base = uf.url + res = newResource(schPtr, base) + } else if schPtr.isEmpty() { + // root resource + res = newResource(schPtr, base) + } + + if res != nil { + found := false + for _, res := range r.resources { + if res.id == base { + found = true + if res.ptr != schPtr { + return &DuplicateIDError{base.String(), r.url.String(), string(schPtr), string(res.ptr)} + } + } + } + if !found { + if hasSchema { + vocabs, err := rr.loader.getMetaVocabs(sch, draft, rr.vocabularies) + if err != nil { + return err + } + res.dialect = dialect{draft, vocabs} + } else { + res.dialect = fallback + } + r.resources[schPtr] = res + } + } + + var baseRes *resource + for _, res := range r.resources { + if res.id == base { + baseRes = res + break + } + } + if baseRes == nil { + panic("baseres is nil") + } + + // found base resource + if err := r.collectAnchors(sch, schPtr, baseRes); err != nil { + return err + } + + // process subschemas + subschemas := map[jsonPointer]any{} + for _, sp := range draft.subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + for _, vocab := range baseRes.dialect.activeVocabs(true, rr.vocabularies) { + if v := rr.vocabularies[vocab]; v != nil { + for _, sp := range v.Subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + } + } + for ptr, v := range subschemas { + if err := rr.collectResources(r, v, base, ptr, baseRes.dialect); err != nil { + return err + } + } + + return nil +} + +func (rr *roots) ensureSubschema(up urlPtr) error { + r, err := rr.orLoad(up.url) + if err != nil { + return err + } + if _, ok := r.subschemasProcessed[up.ptr]; ok { + return nil + } + v, err := up.lookup(r.doc) + if err != nil { + return err + } + rClone := r.clone() + if err := rr.addSubschema(rClone, up.ptr); err != nil { + return err + } + if err := rr.validate(rClone, v, up.ptr); err != nil { + return err + } + rr.roots[r.url] = rClone + return nil +} + +func (rr *roots) addSubschema(r *root, ptr jsonPointer) error { + v, err := (&urlPtr{r.url, ptr}).lookup(r.doc) + if err != nil { + return err + } + base := r.resource(ptr) + baseURL := base.id + if err := rr.collectResources(r, v, baseURL, ptr, base.dialect); err != nil { + return err + } + + // collect anchors + if _, ok := r.resources[ptr]; !ok { + res := r.resource(ptr) + if err := r.collectAnchors(v, ptr, res); err != nil { + return err + } + } + return nil +} + +func (rr *roots) validate(r *root, v any, ptr jsonPointer) error { + dialect := r.resource(ptr).dialect + meta := dialect.getSchema(rr.assertVocabs, rr.vocabularies) + if err := meta.validate(v, rr.regexpEngine, meta, r.resources, rr.assertVocabs, rr.vocabularies); err != nil { + up := urlPtr{r.url, ptr} + return &SchemaValidationError{URL: up.String(), Err: err} + } + return nil +} + +// -- + +type InvalidMetaSchemaURLError struct { + URL string + Err error +} + +func (e *InvalidMetaSchemaURLError) Error() string { + return fmt.Sprintf("invalid $schema in %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedDraftError struct { + URL string +} + +func (e *UnsupportedDraftError) Error() string { + return fmt.Sprintf("draft %q is not supported", e.URL) +} + +// -- + +type MetaSchemaCycleError struct { + URL string +} + +func (e *MetaSchemaCycleError) Error() string { + return fmt.Sprintf("cycle in resolving $schema in %q", e.URL) +} + +// -- + +type MetaSchemaMismatchError struct { + URL string +} + +func (e *MetaSchemaMismatchError) Error() string { + return fmt.Sprintf("$schema in %q does not match with $schema in root", e.URL) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go new file mode 100644 index 000000000..a970311fb --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go @@ -0,0 +1,248 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Schema is the regpresentation of a compiled +// jsonschema. +type Schema struct { + up urlPtr + resource *Schema + dynamicAnchors map[string]*Schema + allPropsEvaluated bool + allItemsEvaluated bool + numItemsEvaluated int + + DraftVersion int + Location string + + // type agnostic -- + Bool *bool // boolean schema + ID string + Ref *Schema + Anchor string + RecursiveRef *Schema + RecursiveAnchor bool + DynamicRef *DynamicRef + DynamicAnchor string // "" if not specified + Types *Types + Enum *Enum + Const *any + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema + Else *Schema + Format *Format + + // object -- + MaxProperties *int + MinProperties *int + Required []string + PropertyNames *Schema + Properties map[string]*Schema + PatternProperties map[Regexp]*Schema + AdditionalProperties any // nil or bool or *Schema + Dependencies map[string]any // value is []string or *Schema + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array -- + MinItems *int + MaxItems *int + UniqueItems bool + Contains *Schema + MinContains *int + MaxContains *int + Items any // nil or []*Schema or *Schema + AdditionalItems any // nil or bool or *Schema + PrefixItems []*Schema + Items2020 *Schema + UnevaluatedItems *Schema + + // string -- + MinLength *int + MaxLength *int + Pattern Regexp + ContentEncoding *Decoder + ContentMediaType *MediaType + ContentSchema *Schema + + // number -- + Maximum *big.Rat + Minimum *big.Rat + ExclusiveMaximum *big.Rat + ExclusiveMinimum *big.Rat + MultipleOf *big.Rat + + Extensions []SchemaExt + + // annotations -- + Title string + Description string + Default *any + Comment string + ReadOnly bool + WriteOnly bool + Examples []any + Deprecated bool +} + +// -- + +type jsonType int + +const ( + invalidType jsonType = 0 + nullType jsonType = 1 << iota + booleanType + numberType + integerType + stringType + arrayType + objectType +) + +func typeOf(v any) jsonType { + switch v.(type) { + case nil: + return nullType + case bool: + return booleanType + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return numberType + case string: + return stringType + case []any: + return arrayType + case map[string]any: + return objectType + default: + return invalidType + } +} + +func typeFromString(s string) jsonType { + switch s { + case "null": + return nullType + case "boolean": + return booleanType + case "number": + return numberType + case "integer": + return integerType + case "string": + return stringType + case "array": + return arrayType + case "object": + return objectType + } + return invalidType +} + +func (jt jsonType) String() string { + switch jt { + case nullType: + return "null" + case booleanType: + return "boolean" + case numberType: + return "number" + case integerType: + return "integer" + case stringType: + return "string" + case arrayType: + return "array" + case objectType: + return "object" + } + return "" +} + +// -- + +// Types encapsulates list of json value types. +type Types int + +func newTypes(v any) *Types { + var types Types + switch v := v.(type) { + case string: + types.add(typeFromString(v)) + case []any: + for _, item := range v { + if s, ok := item.(string); ok { + types.add(typeFromString(s)) + } + } + } + if types.IsEmpty() { + return nil + } + return &types +} + +func (tt Types) IsEmpty() bool { + return tt == 0 +} + +func (tt *Types) add(t jsonType) { + *tt = Types(int(*tt) | int(t)) +} + +func (tt Types) contains(t jsonType) bool { + return int(tt)&int(t) != 0 +} + +func (tt Types) ToStrings() []string { + types := []jsonType{ + nullType, booleanType, numberType, integerType, + stringType, arrayType, objectType, + } + var arr []string + for _, t := range types { + if tt.contains(t) { + arr = append(arr, t.String()) + } + } + return arr +} + +func (tt Types) String() string { + return fmt.Sprintf("%v", tt.ToStrings()) +} + +// -- + +type Enum struct { + Values []any + types Types +} + +func newEnum(arr []any) *Enum { + var types Types + for _, item := range arr { + types.add(typeOf(item)) + } + return &Enum{arr, types} +} + +// -- + +type DynamicRef struct { + Ref *Schema + Anchor string // "" if not specified +} + +func newSchema(up urlPtr) *Schema { + return &Schema{up: up, Location: up.String()} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go new file mode 100644 index 000000000..c6f8e7752 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go @@ -0,0 +1,464 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + gourl "net/url" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +// -- + +type url (string) + +func (u url) String() string { + return string(u) +} + +func (u url) join(ref string) (*urlFrag, error) { + base, err := gourl.Parse(string(u)) + if err != nil { + return nil, &ParseURLError{URL: u.String(), Err: err} + } + + ref, frag, err := splitFragment(ref) + if err != nil { + return nil, err + } + refURL, err := gourl.Parse(ref) + if err != nil { + return nil, &ParseURLError{URL: ref, Err: err} + } + resolved := base.ResolveReference(refURL) + + // see https://github.com/golang/go/issues/66084 (net/url: ResolveReference ignores Opaque value) + if !refURL.IsAbs() && base.Opaque != "" { + resolved.Opaque = base.Opaque + } + + return &urlFrag{url: url(resolved.String()), frag: frag}, nil +} + +// -- + +type jsonPointer string + +func escape(tok string) string { + tok = strings.ReplaceAll(tok, "~", "~0") + tok = strings.ReplaceAll(tok, "/", "~1") + return tok +} + +func unescape(tok string) (string, bool) { + tilde := strings.IndexByte(tok, '~') + if tilde == -1 { + return tok, true + } + sb := new(strings.Builder) + for { + sb.WriteString(tok[:tilde]) + tok = tok[tilde+1:] + if tok == "" { + return "", false + } + switch tok[0] { + case '0': + sb.WriteByte('~') + case '1': + sb.WriteByte('/') + default: + return "", false + } + tok = tok[1:] + tilde = strings.IndexByte(tok, '~') + if tilde == -1 { + sb.WriteString(tok) + break + } + } + return sb.String(), true +} + +func (ptr jsonPointer) isEmpty() bool { + return string(ptr) == "" +} + +func (ptr jsonPointer) concat(next jsonPointer) jsonPointer { + return jsonPointer(fmt.Sprintf("%s%s", ptr, next)) +} + +func (ptr jsonPointer) append(tok string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s", ptr, escape(tok))) +} + +func (ptr jsonPointer) append2(tok1, tok2 string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s/%s", ptr, escape(tok1), escape(tok2))) +} + +// -- + +type anchor string + +// -- + +type fragment string + +func decode(frag string) (string, error) { + return gourl.PathUnescape(frag) +} + +// avoids escaping /. +func encode(frag string) string { + var sb strings.Builder + for i, tok := range strings.Split(frag, "/") { + if i > 0 { + sb.WriteByte('/') + } + sb.WriteString(gourl.PathEscape(tok)) + } + return sb.String() +} + +func splitFragment(str string) (string, fragment, error) { + u, f := split(str) + f, err := decode(f) + if err != nil { + return "", fragment(""), &ParseURLError{URL: str, Err: err} + } + return u, fragment(f), nil +} + +func split(str string) (string, string) { + hash := strings.IndexByte(str, '#') + if hash == -1 { + return str, "" + } + return str[:hash], str[hash+1:] +} + +func (frag fragment) convert() any { + str := string(frag) + if str == "" || strings.HasPrefix(str, "/") { + return jsonPointer(str) + } + return anchor(str) +} + +// -- + +type urlFrag struct { + url url + frag fragment +} + +func startsWithWindowsDrive(s string) bool { + if s != "" && strings.HasPrefix(s[1:], `:\`) { + return (s[0] >= 'a' && s[0] <= 'z') || (s[0] >= 'A' && s[0] <= 'Z') + } + return false +} + +func absolute(input string) (*urlFrag, error) { + u, frag, err := splitFragment(input) + if err != nil { + return nil, err + } + + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && startsWithWindowsDrive(u) { + u = "file:///" + filepath.ToSlash(u) + } + + gourl, err := gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + if gourl.IsAbs() { + return &urlFrag{url(u), frag}, nil + } + + // avoid filesystem api in wasm + if runtime.GOOS != "js" { + abs, err := filepath.Abs(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + u = abs + } + if !strings.HasPrefix(u, "/") { + u = "/" + u + } + u = "file://" + filepath.ToSlash(u) + + _, err = gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + return &urlFrag{url: url(u), frag: frag}, nil +} + +func (uf *urlFrag) String() string { + return fmt.Sprintf("%s#%s", uf.url, encode(string(uf.frag))) +} + +// -- + +type urlPtr struct { + url url + ptr jsonPointer +} + +func (up *urlPtr) lookup(v any) (any, error) { + for _, tok := range strings.Split(string(up.ptr), "/")[1:] { + tok, ok := unescape(tok) + if !ok { + return nil, &InvalidJsonPointerError{up.String()} + } + switch val := v.(type) { + case map[string]any: + if pvalue, ok := val[tok]; ok { + v = pvalue + continue + } + case []any: + if index, err := strconv.Atoi(tok); err == nil { + if index >= 0 && index < len(val) { + v = val[index] + continue + } + } + } + return nil, &JSONPointerNotFoundError{up.String()} + } + return v, nil +} + +func (up *urlPtr) format(tok string) string { + return fmt.Sprintf("%s#%s/%s", up.url, encode(string(up.ptr)), encode(escape(tok))) +} + +func (up *urlPtr) String() string { + return fmt.Sprintf("%s#%s", up.url, encode(string(up.ptr))) +} + +// -- + +func minInt(i, j int) int { + if i < j { + return i + } + return j +} + +func strVal(obj map[string]any, prop string) (string, bool) { + v, ok := obj[prop] + if !ok { + return "", false + } + s, ok := v.(string) + return s, ok +} + +func isInteger(num any) bool { + rat, ok := new(big.Rat).SetString(fmt.Sprint(num)) + return ok && rat.IsInt() +} + +// quote returns single-quoted string. +// used for embedding quoted strings in json. +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func equals(v1, v2 any) (bool, ErrorKind) { + switch v1 := v1.(type) { + case map[string]any: + v2, ok := v2.(map[string]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for k, val1 := range v1 { + val2, ok := v2[k] + if !ok { + return false, nil + } + if ok, k := equals(val1, val2); !ok || k != nil { + return ok, k + } + } + return true, nil + case []any: + v2, ok := v2.([]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for i := range v1 { + if ok, k := equals(v1[i], v2[i]); !ok || k != nil { + return ok, k + } + } + return true, nil + case nil: + return v2 == nil, nil + case bool: + v2, ok := v2.(bool) + return ok && v1 == v2, nil + case string: + v2, ok := v2.(string) + return ok && v1 == v2, nil + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + num1, ok1 := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, ok2 := new(big.Rat).SetString(fmt.Sprint(v2)) + return ok1 && ok2 && num1.Cmp(num2) == 0, nil + default: + return false, &kind.InvalidJsonValue{Value: v1} + } +} + +func duplicates(arr []any) (int, int, ErrorKind) { + if len(arr) <= 20 { + for i := 1; i < len(arr); i++ { + for j := 0; j < i; j++ { + if ok, k := equals(arr[i], arr[j]); ok || k != nil { + return j, i, k + } + } + } + return -1, -1, nil + } + + m := make(map[uint64][]int) + h := new(maphash.Hash) + for i, item := range arr { + h.Reset() + writeHash(item, h) + hash := h.Sum64() + indexes, ok := m[hash] + if ok { + for _, j := range indexes { + if ok, k := equals(item, arr[j]); ok || k != nil { + return j, i, k + } + } + } + indexes = append(indexes, i) + m[hash] = indexes + } + return -1, -1, nil +} + +func writeHash(v any, h *maphash.Hash) ErrorKind { + switch v := v.(type) { + case map[string]any: + _ = h.WriteByte(0) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + slices.Sort(props) + for _, prop := range props { + writeHash(prop, h) + writeHash(v[prop], h) + } + case []any: + _ = h.WriteByte(1) + for _, item := range v { + writeHash(item, h) + } + case nil: + _ = h.WriteByte(2) + case bool: + _ = h.WriteByte(3) + if v { + _ = h.WriteByte(1) + } else { + _ = h.WriteByte(0) + } + case string: + _ = h.WriteByte(4) + _, _ = h.WriteString(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + _ = h.WriteByte(5) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + _, _ = h.Write(num.Num().Bytes()) + _, _ = h.Write(num.Denom().Bytes()) + default: + return &kind.InvalidJsonValue{Value: v} + } + return nil +} + +// -- + +type ParseURLError struct { + URL string + Err error +} + +func (e *ParseURLError) Error() string { + return fmt.Sprintf("error in parsing %q: %v", e.URL, e.Err) +} + +// -- + +type InvalidJsonPointerError struct { + URL string +} + +func (e *InvalidJsonPointerError) Error() string { + return fmt.Sprintf("invalid json-pointer %q", e.URL) +} + +// -- + +type JSONPointerNotFoundError struct { + URL string +} + +func (e *JSONPointerNotFoundError) Error() string { + return fmt.Sprintf("json-pointer in %q not found", e.URL) +} + +// -- + +type SchemaValidationError struct { + URL string + Err error +} + +func (e *SchemaValidationError) Error() string { + return fmt.Sprintf("%q is not valid against metaschema: %v", e.URL, e.Err) +} + +// -- + +// LocalizableError is an error whose message is localizable. +func LocalizableError(format string, args ...any) error { + return &localizableError{format, args} +} + +type localizableError struct { + msg string + args []any +} + +func (e *localizableError) Error() string { + return fmt.Sprintf(e.msg, e.args...) +} + +func (e *localizableError) LocalizedError(p *message.Printer) string { + return p.Sprintf(e.msg, e.args...) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go new file mode 100644 index 000000000..e2ace37a9 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go @@ -0,0 +1,975 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "slices" + "strconv" + "unicode/utf8" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +func (sch *Schema) Validate(v any) error { + return sch.validate(v, nil, nil, nil, false, nil) +} + +func (sch *Schema) validate(v any, regexpEngine RegexpEngine, meta *Schema, resources map[jsonPointer]*resource, assertVocabs bool, vocabularies map[string]*Vocabulary) error { + vd := validator{ + v: v, + vloc: make([]string, 0, 8), + sch: sch, + scp: &scope{sch, "", 0, nil}, + uneval: unevalFrom(v, sch, false), + errors: nil, + boolResult: false, + regexpEngine: regexpEngine, + meta: meta, + resources: resources, + assertVocabs: assertVocabs, + vocabularies: vocabularies, + } + if _, err := vd.validate(); err != nil { + verr := err.(*ValidationError) + var causes []*ValidationError + if _, ok := verr.ErrorKind.(*kind.Group); ok { + causes = verr.Causes + } else { + causes = []*ValidationError{verr} + } + return &ValidationError{ + SchemaURL: sch.Location, + InstanceLocation: nil, + ErrorKind: &kind.Schema{Location: sch.Location}, + Causes: causes, + } + } + + return nil +} + +type validator struct { + v any + vloc []string + sch *Schema + scp *scope + uneval *uneval + errors []*ValidationError + boolResult bool // is interested to know valid or not (but not actuall error) + regexpEngine RegexpEngine + + // meta validation + meta *Schema // set only when validating with metaschema + resources map[jsonPointer]*resource // resources which should be validated with their dialect + assertVocabs bool + vocabularies map[string]*Vocabulary +} + +func (vd *validator) validate() (*uneval, error) { + s := vd.sch + v := vd.v + + // boolean -- + if s.Bool != nil { + if *s.Bool { + return vd.uneval, nil + } else { + return nil, vd.error(&kind.FalseSchema{}) + } + } + + // check cycle -- + if scp := vd.scp.checkCycle(); scp != nil { + return nil, vd.error(&kind.RefCycle{ + URL: s.Location, + KeywordLocation1: vd.scp.kwLoc(), + KeywordLocation2: scp.kwLoc(), + }) + } + + t := typeOf(v) + if t == invalidType { + return nil, vd.error(&kind.InvalidJsonValue{Value: v}) + } + + // type -- + if s.Types != nil && !s.Types.IsEmpty() { + matched := s.Types.contains(t) || (s.Types.contains(integerType) && t == numberType && isInteger(v)) + if !matched { + return nil, vd.error(&kind.Type{Got: t.String(), Want: s.Types.ToStrings()}) + } + } + + // const -- + if s.Const != nil { + ok, k := equals(v, *s.Const) + if k != nil { + return nil, vd.error(k) + } else if !ok { + return nil, vd.error(&kind.Const{Got: v, Want: *s.Const}) + } + } + + // enum -- + if s.Enum != nil { + matched := s.Enum.types.contains(typeOf(v)) + if matched { + matched = false + for _, item := range s.Enum.Values { + ok, k := equals(v, item) + if k != nil { + return nil, vd.error(k) + } else if ok { + matched = true + break + } + } + } + if !matched { + return nil, vd.error(&kind.Enum{Got: v, Want: s.Enum.Values}) + } + } + + // format -- + if s.Format != nil { + var err error + if s.Format.Name == "regex" && vd.regexpEngine != nil { + err = vd.regexpEngine.validate(v) + } else { + err = s.Format.Validate(v) + } + if err != nil { + return nil, vd.error(&kind.Format{Got: v, Want: s.Format.Name, Err: err}) + } + } + + // $ref -- + if s.Ref != nil { + err := vd.validateRef(s.Ref, "$ref") + if s.DraftVersion < 2019 { + return vd.uneval, err + } + if err != nil { + vd.addErr(err) + } + } + + // type specific validations -- + switch v := v.(type) { + case map[string]any: + vd.objValidate(v) + case []any: + vd.arrValidate(v) + case string: + vd.strValidate(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + vd.numValidate(v) + } + + if len(vd.errors) == 0 || !vd.boolResult { + if s.DraftVersion >= 2019 { + vd.validateRefs() + } + vd.condValidate() + + for _, ext := range s.Extensions { + ext.Validate(&ValidatorContext{vd}, v) + } + + if s.DraftVersion >= 2019 { + vd.unevalValidate() + } + } + + switch len(vd.errors) { + case 0: + return vd.uneval, nil + case 1: + return nil, vd.errors[0] + default: + verr := vd.error(&kind.Group{}) + verr.Causes = vd.errors + return nil, verr + } +} + +func (vd *validator) objValidate(obj map[string]any) { + s := vd.sch + + // minProperties -- + if s.MinProperties != nil { + if len(obj) < *s.MinProperties { + vd.addError(&kind.MinProperties{Got: len(obj), Want: *s.MinProperties}) + } + } + + // maxProperties -- + if s.MaxProperties != nil { + if len(obj) > *s.MaxProperties { + vd.addError(&kind.MaxProperties{Got: len(obj), Want: *s.MaxProperties}) + } + } + + // required -- + if len(s.Required) > 0 { + if missing := vd.findMissing(obj, s.Required); missing != nil { + vd.addError(&kind.Required{Missing: missing}) + } + } + + if vd.boolResult && len(vd.errors) > 0 { + return + } + + // dependencies -- + for pname, dep := range s.Dependencies { + if _, ok := obj[pname]; ok { + switch dep := dep.(type) { + case []string: + if missing := vd.findMissing(obj, dep); missing != nil { + vd.addError(&kind.Dependency{Prop: pname, Missing: missing}) + } + case *Schema: + vd.addErr(vd.validateSelf(dep, "", false)) + } + } + } + + var additionalPros []string + for pname, pvalue := range obj { + if vd.boolResult && len(vd.errors) > 0 { + return + } + evaluated := false + + // properties -- + if sch, ok := s.Properties[pname]; ok { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + + // patternProperties -- + for regex, sch := range s.PatternProperties { + if regex.MatchString(pname) { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + } + + if !evaluated && s.AdditionalProperties != nil { + evaluated = true + switch additional := s.AdditionalProperties.(type) { + case bool: + if !additional { + additionalPros = append(additionalPros, pname) + } + case *Schema: + vd.addErr(vd.validateVal(additional, pvalue, pname)) + } + } + + if evaluated { + delete(vd.uneval.props, pname) + } + } + if len(additionalPros) > 0 { + vd.addError(&kind.AdditionalProperties{Properties: additionalPros}) + } + + if s.DraftVersion == 4 { + return + } + + // propertyNames -- + if s.PropertyNames != nil { + for pname := range obj { + sch, meta, resources := s.PropertyNames, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err := sch.validate(pname, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.PropertyNames.Location + verr.ErrorKind = &kind.PropertyNames{Property: pname} + vd.addErr(verr) + } + } + } + + if s.DraftVersion == 6 { + return + } + + // dependentSchemas -- + for pname, sch := range s.DependentSchemas { + if _, ok := obj[pname]; ok { + vd.addErr(vd.validateSelf(sch, "", false)) + } + } + + // dependentRequired -- + for pname, reqd := range s.DependentRequired { + if _, ok := obj[pname]; ok { + if missing := vd.findMissing(obj, reqd); missing != nil { + vd.addError(&kind.DependentRequired{Prop: pname, Missing: missing}) + } + } + } +} + +func (vd *validator) arrValidate(arr []any) { + s := vd.sch + + // minItems -- + if s.MinItems != nil { + if len(arr) < *s.MinItems { + vd.addError(&kind.MinItems{Got: len(arr), Want: *s.MinItems}) + } + } + + // maxItems -- + if s.MaxItems != nil { + if len(arr) > *s.MaxItems { + vd.addError(&kind.MaxItems{Got: len(arr), Want: *s.MaxItems}) + } + } + + // uniqueItems -- + if s.UniqueItems && len(arr) > 1 { + i, j, k := duplicates(arr) + if k != nil { + vd.addError(k) + } else if i != -1 { + vd.addError(&kind.UniqueItems{Duplicates: [2]int{i, j}}) + } + } + + if s.DraftVersion < 2020 { + evaluated := 0 + + // items -- + switch items := s.Items.(type) { + case *Schema: + for i, item := range arr { + vd.addErr(vd.validateVal(items, item, strconv.Itoa(i))) + } + evaluated = len(arr) + case []*Schema: + min := minInt(len(arr), len(items)) + for i, item := range arr[:min] { + vd.addErr(vd.validateVal(items[i], item, strconv.Itoa(i))) + } + evaluated = min + } + + // additionalItems -- + if s.AdditionalItems != nil { + switch additional := s.AdditionalItems.(type) { + case bool: + if !additional && evaluated != len(arr) { + vd.addError(&kind.AdditionalItems{Count: len(arr) - evaluated}) + } + case *Schema: + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(additional, item, strconv.Itoa(i))) + } + } + } + } else { + evaluated := minInt(len(s.PrefixItems), len(arr)) + + // prefixItems -- + for i, item := range arr[:evaluated] { + vd.addErr(vd.validateVal(s.PrefixItems[i], item, strconv.Itoa(i))) + } + + // items2020 -- + if s.Items2020 != nil { + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(s.Items2020, item, strconv.Itoa(i))) + } + } + } + + // contains -- + if s.Contains != nil { + var errors []*ValidationError + var matched []int + + for i, item := range arr { + if err := vd.validateVal(s.Contains, item, strconv.Itoa(i)); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = append(matched, i) + if s.DraftVersion >= 2020 { + delete(vd.uneval.items, i) + } + } + } + + // minContains -- + if s.MinContains != nil { + if len(matched) < *s.MinContains { + vd.addErrors(errors, &kind.MinContains{Got: matched, Want: *s.MinContains}) + } + } else if len(matched) == 0 { + vd.addErrors(errors, &kind.Contains{}) + } + + // maxContains -- + if s.MaxContains != nil { + if len(matched) > *s.MaxContains { + vd.addError(&kind.MaxContains{Got: matched, Want: *s.MaxContains}) + } + } + } +} + +func (vd *validator) strValidate(str string) { + s := vd.sch + + strLen := -1 + if s.MinLength != nil || s.MaxLength != nil { + strLen = utf8.RuneCount([]byte(str)) + } + + // minLength -- + if s.MinLength != nil { + if strLen < *s.MinLength { + vd.addError(&kind.MinLength{Got: strLen, Want: *s.MinLength}) + } + } + + // maxLength -- + if s.MaxLength != nil { + if strLen > *s.MaxLength { + vd.addError(&kind.MaxLength{Got: strLen, Want: *s.MaxLength}) + } + } + + // pattern -- + if s.Pattern != nil { + if !s.Pattern.MatchString(str) { + vd.addError(&kind.Pattern{Got: str, Want: s.Pattern.String()}) + } + } + + if s.DraftVersion == 6 { + return + } + + var err error + + // contentEncoding -- + decoded := []byte(str) + if s.ContentEncoding != nil { + decoded, err = s.ContentEncoding.Decode(str) + if err != nil { + decoded = nil + vd.addError(&kind.ContentEncoding{Want: s.ContentEncoding.Name, Err: err}) + } + } + + var deserialized *any + if decoded != nil && s.ContentMediaType != nil { + if s.ContentSchema == nil { + err = s.ContentMediaType.Validate(decoded) + } else { + var value any + value, err = s.ContentMediaType.UnmarshalJSON(decoded) + if err == nil { + deserialized = &value + } + } + if err != nil { + vd.addError(&kind.ContentMediaType{ + Got: decoded, + Want: s.ContentMediaType.Name, + Err: err, + }) + } + } + + if deserialized != nil && s.ContentSchema != nil { + sch, meta, resources := s.ContentSchema, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err = sch.validate(*deserialized, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.Location + verr.ErrorKind = &kind.ContentSchema{} + vd.addErr(verr) + } + } +} + +func (vd *validator) numValidate(v any) { + s := vd.sch + + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprintf("%v", v)) + } + return numVal + } + + // minimum -- + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + vd.addError(&kind.Minimum{Got: num(), Want: s.Minimum}) + } + + // maximum -- + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + vd.addError(&kind.Maximum{Got: num(), Want: s.Maximum}) + } + + // exclusiveMinimum + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + vd.addError(&kind.ExclusiveMinimum{Got: num(), Want: s.ExclusiveMinimum}) + } + + // exclusiveMaximum + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + vd.addError(&kind.ExclusiveMaximum{Got: num(), Want: s.ExclusiveMaximum}) + } + + // multipleOf + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + vd.addError(&kind.MultipleOf{Got: num(), Want: s.MultipleOf}) + } + } +} + +func (vd *validator) condValidate() { + s := vd.sch + + // not -- + if s.Not != nil { + if vd.validateSelf(s.Not, "", true) == nil { + vd.addError(&kind.Not{}) + } + } + + // allOf -- + if len(s.AllOf) > 0 { + var errors []*ValidationError + for _, sch := range s.AllOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + if vd.boolResult { + break + } + } + } + if len(errors) != 0 { + vd.addErrors(errors, &kind.AllOf{}) + } + } + + // anyOf + if len(s.AnyOf) > 0 { + var matched bool + var errors []*ValidationError + for _, sch := range s.AnyOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = true + // for uneval, all schemas must be evaluated + if vd.uneval.isEmpty() { + break + } + } + } + if !matched { + vd.addErrors(errors, &kind.AnyOf{}) + } + } + + // oneOf + if len(s.OneOf) > 0 { + var matched = -1 + var errors []*ValidationError + for i, sch := range s.OneOf { + if err := vd.validateSelf(sch, "", matched != -1); err != nil { + if matched == -1 { + errors = append(errors, err.(*ValidationError)) + } + } else { + if matched == -1 { + matched = i + } else { + vd.addError(&kind.OneOf{Subschemas: []int{matched, i}}) + break + } + } + } + if matched == -1 { + vd.addErrors(errors, &kind.OneOf{Subschemas: nil}) + } + } + + // if, then, else -- + if s.If != nil { + if vd.validateSelf(s.If, "", true) == nil { + if s.Then != nil { + vd.addErr(vd.validateSelf(s.Then, "", false)) + } + } else if s.Else != nil { + vd.addErr(vd.validateSelf(s.Else, "", false)) + } + } +} + +func (vd *validator) unevalValidate() { + s := vd.sch + + // unevaluatedProperties + if obj, ok := vd.v.(map[string]any); ok && s.UnevaluatedProperties != nil { + for pname := range vd.uneval.props { + if pvalue, ok := obj[pname]; ok { + vd.addErr(vd.validateVal(s.UnevaluatedProperties, pvalue, pname)) + } + } + vd.uneval.props = nil + } + + // unevaluatedItems + if arr, ok := vd.v.([]any); ok && s.UnevaluatedItems != nil { + for i := range vd.uneval.items { + vd.addErr(vd.validateVal(s.UnevaluatedItems, arr[i], strconv.Itoa(i))) + } + vd.uneval.items = nil + } +} + +// validation helpers -- + +func (vd *validator) validateSelf(sch *Schema, refKw string, boolResult bool) error { + scp := vd.scp.child(sch, refKw, vd.scp.vid) + uneval := unevalFrom(vd.v, sch, !vd.uneval.isEmpty()) + subvd := validator{ + v: vd.v, + vloc: vd.vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult || boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + uneval, err := subvd.validate() + if err == nil { + vd.uneval.merge(uneval) + } + return err +} + +func (vd *validator) validateVal(sch *Schema, v any, vtok string) error { + vloc := append(vd.vloc, vtok) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) validateValue(sch *Schema, v any, vpath []string) error { + vloc := append(vd.vloc, vpath...) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) metaResource(sch *Schema) *resource { + if sch != vd.meta { + return nil + } + ptr := "" + for _, tok := range vd.instanceLocation() { + ptr += "/" + ptr += escape(tok) + } + return vd.resources[jsonPointer(ptr)] +} + +func (vd *validator) handleMeta() { + res := vd.metaResource(vd.sch) + if res == nil { + return + } + sch := res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + vd.meta = sch + vd.sch = sch +} + +// reference validation -- + +func (vd *validator) validateRef(sch *Schema, kw string) error { + err := vd.validateSelf(sch, kw, false) + if err != nil { + refErr := vd.error(&kind.Reference{Keyword: kw, URL: sch.Location}) + verr := err.(*ValidationError) + if _, ok := verr.ErrorKind.(*kind.Group); ok { + refErr.Causes = verr.Causes + } else { + refErr.Causes = append(refErr.Causes, verr) + } + return refErr + } + return nil +} + +func (vd *validator) resolveRecursiveAnchor(fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if scp.sch.resource.RecursiveAnchor { + sch = scp.sch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) resolveDynamicAnchor(name string, fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if dsch, ok := scp.sch.resource.dynamicAnchors[name]; ok { + sch = dsch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) validateRefs() { + // $recursiveRef -- + if sch := vd.sch.RecursiveRef; sch != nil { + if sch.RecursiveAnchor { + sch = vd.resolveRecursiveAnchor(sch) + } + vd.addErr(vd.validateRef(sch, "$recursiveRef")) + } + + // $dynamicRef -- + if dref := vd.sch.DynamicRef; dref != nil { + sch := dref.Ref // initial target + if dref.Anchor != "" { + // $dynamicRef includes anchor + if sch.DynamicAnchor == dref.Anchor { + // initial target has matching $dynamicAnchor + sch = vd.resolveDynamicAnchor(dref.Anchor, sch) + } + } + vd.addErr(vd.validateRef(sch, "$dynamicRef")) + } +} + +// error helpers -- + +func (vd *validator) instanceLocation() []string { + return slices.Clone(vd.vloc) +} + +func (vd *validator) error(kind ErrorKind) *ValidationError { + if vd.boolResult { + return &ValidationError{} + } + return &ValidationError{ + SchemaURL: vd.sch.Location, + InstanceLocation: vd.instanceLocation(), + ErrorKind: kind, + Causes: nil, + } +} + +func (vd *validator) addErr(err error) { + if err != nil { + vd.errors = append(vd.errors, err.(*ValidationError)) + } +} + +func (vd *validator) addError(kind ErrorKind) { + vd.errors = append(vd.errors, vd.error(kind)) +} + +func (vd *validator) addErrors(errors []*ValidationError, kind ErrorKind) { + err := vd.error(kind) + err.Causes = errors + vd.errors = append(vd.errors, err) +} + +func (vd *validator) findMissing(obj map[string]any, reqd []string) []string { + var missing []string + for _, pname := range reqd { + if _, ok := obj[pname]; !ok { + if vd.boolResult { + return []string{} // non-nil + } + missing = append(missing, pname) + } + } + return missing +} + +// -- + +type scope struct { + sch *Schema + + // if empty, compute from self.sch and self.parent.sch. + // not empty, only when there is a jump i.e, $ref, $XXXRef + refKeyword string + + // unique id of value being validated + // if two scopes validate same value, they will have + // same vid + vid int + + parent *scope +} + +func (sc *scope) child(sch *Schema, refKeyword string, vid int) *scope { + return &scope{sch, refKeyword, vid, sc} +} + +func (sc *scope) checkCycle() *scope { + scp := sc.parent + for scp != nil { + if scp.vid != sc.vid { + break + } + if scp.sch == sc.sch { + return scp + } + scp = scp.parent + } + return nil +} + +func (sc *scope) kwLoc() string { + var loc string + for sc.parent != nil { + if sc.refKeyword != "" { + loc = fmt.Sprintf("/%s%s", escape(sc.refKeyword), loc) + } else { + cur := sc.sch.Location + parent := sc.parent.sch.Location + loc = fmt.Sprintf("%s%s", cur[len(parent):], loc) + } + sc = sc.parent + } + return loc +} + +// -- + +type uneval struct { + props map[string]struct{} + items map[int]struct{} +} + +func unevalFrom(v any, sch *Schema, callerNeeds bool) *uneval { + uneval := &uneval{} + switch v := v.(type) { + case map[string]any: + if !sch.allPropsEvaluated && (callerNeeds || sch.UnevaluatedProperties != nil) { + uneval.props = map[string]struct{}{} + for k := range v { + uneval.props[k] = struct{}{} + } + } + case []any: + if !sch.allItemsEvaluated && (callerNeeds || sch.UnevaluatedItems != nil) && sch.numItemsEvaluated < len(v) { + uneval.items = map[int]struct{}{} + for i := sch.numItemsEvaluated; i < len(v); i++ { + uneval.items[i] = struct{}{} + } + } + } + return uneval +} + +func (ue *uneval) merge(other *uneval) { + for k := range ue.props { + if _, ok := other.props[k]; !ok { + delete(ue.props, k) + } + } + for i := range ue.items { + if _, ok := other.items[i]; !ok { + delete(ue.items, i) + } + } +} + +func (ue *uneval) isEmpty() bool { + return len(ue.props) == 0 && len(ue.items) == 0 +} + +// -- + +type ValidationError struct { + // absolute, dereferenced schema location. + SchemaURL string + + // location of the JSON value within the instance being validated. + InstanceLocation []string + + // kind of error + ErrorKind ErrorKind + + // holds nested errors + Causes []*ValidationError +} + +type ErrorKind interface { + KeywordPath() []string + LocalizedString(*message.Printer) string +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go new file mode 100644 index 000000000..18ace91e8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go @@ -0,0 +1,106 @@ +package jsonschema + +// CompilerContext provides helpers for +// compiling a [Vocabulary]. +type CompilerContext struct { + c *objCompiler +} + +func (ctx *CompilerContext) Enqueue(schPath []string) *Schema { + ptr := ctx.c.up.ptr + for _, tok := range schPath { + ptr = ptr.append(tok) + } + return ctx.c.enqueuePtr(ptr) +} + +// Vocabulary defines a set of keywords, their syntax and +// their semantics. +type Vocabulary struct { + // URL identifier for this Vocabulary. + URL string + + // Schema that is used to validate the keywords that is introduced by this + // vocabulary. + Schema *Schema + + // Subschemas lists the possible locations of subschemas introduced by + // this vocabulary. + Subschemas []SchemaPath + + // Compile compiles the keywords(introduced by this vocabulary) in obj into [SchemaExt]. + // If obj does not contain any keywords introduced by this vocabulary, nil SchemaExt must + // be returned. + Compile func(ctx *CompilerContext, obj map[string]any) (SchemaExt, error) +} + +// -- + +// SchemaExt is compled form of vocabulary. +type SchemaExt interface { + // Validate validates v against and errors if any are reported + // to ctx. + Validate(ctx *ValidatorContext, v any) +} + +// ValidatorContext provides helpers for +// validating with [SchemaExt]. +type ValidatorContext struct { + vd *validator +} + +// Validate validates v with sch. vpath gives path of v from current context value. +func (ctx *ValidatorContext) Validate(sch *Schema, v any, vpath []string) error { + switch len(vpath) { + case 0: + return ctx.vd.validateSelf(sch, "", false) + case 1: + return ctx.vd.validateVal(sch, v, vpath[0]) + default: + return ctx.vd.validateValue(sch, v, vpath) + } +} + +// EvaluatedProp marks given property of current object as evaluated. +func (ctx *ValidatorContext) EvaluatedProp(pname string) { + delete(ctx.vd.uneval.props, pname) +} + +// EvaluatedItem marks items at given index of current array as evaluated. +func (ctx *ValidatorContext) EvaluatedItem(index int) { + delete(ctx.vd.uneval.items, index) +} + +// AddError reports validation-error of given kind. +func (ctx *ValidatorContext) AddError(k ErrorKind) { + ctx.vd.addError(k) +} + +// AddErrors reports validation-errors of given kind. +func (ctx *ValidatorContext) AddErrors(errors []*ValidationError, k ErrorKind) { + ctx.vd.addErrors(errors, k) +} + +// AddErr reports the given err. This is typically used to report +// the error created by subschema validation. +// +// NOTE that err must be of type *ValidationError. +func (ctx *ValidatorContext) AddErr(err error) { + ctx.vd.addErr(err) +} + +func (ctx *ValidatorContext) Equals(v1, v2 any) (bool, error) { + b, k := equals(v1, v2) + if k != nil { + return false, ctx.vd.error(k) + } + return b, nil +} + +func (ctx *ValidatorContext) Duplicates(arr []any) (int, int, error) { + i, j, k := duplicates(arr) + if k != nil { + return -1, -1, ctx.vd.error(k) + } + return i, j, nil +} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go index 09f7c8240..c9e2dabd1 100644 --- a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go @@ -2,6 +2,7 @@ package analyzer import ( "flag" + "fmt" "go/ast" "go/token" "strings" @@ -364,7 +365,7 @@ func checkHTTPMethod(pass *analysis.Pass, basicLit *ast.BasicLit) { key := strings.ToUpper(currentVal) if newVal, ok := mapping.HTTPMethod[key]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -372,7 +373,7 @@ func checkHTTPStatusCode(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.HTTPStatusCode[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -380,7 +381,7 @@ func checkTimeWeekday(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeWeekday[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -388,7 +389,7 @@ func checkTimeMonth(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeMonth[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -396,7 +397,7 @@ func checkTimeLayout(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeLayout[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -404,7 +405,7 @@ func checkCryptoHash(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.CryptoHash[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -412,7 +413,7 @@ func checkRPCDefaultPath(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.RPCDefaultPath[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -422,7 +423,7 @@ func checkSQLIsolationLevel(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.SQLIsolationLevel[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -430,7 +431,7 @@ func checkTLSSignatureScheme(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TLSSignatureScheme[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -438,7 +439,7 @@ func checkConstantKind(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.ConstantKind[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -514,6 +515,16 @@ func getBasicLitValue(basicLit *ast.BasicLit) string { return val.String() } -func report(pass *analysis.Pass, pos token.Pos, currentVal, newVal string) { - pass.Reportf(pos, "%q can be replaced by %s", currentVal, newVal) +func report(pass *analysis.Pass, rg analysis.Range, currentVal, newVal string) { + pass.Report(analysis.Diagnostic{ + Pos: rg.Pos(), + Message: fmt.Sprintf("%q can be replaced by %s", currentVal, newVal), + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: rg.Pos(), + End: rg.End(), + NewText: []byte(newVal), + }}, + }}, + }) } diff --git a/vendor/github.com/securego/gosec/v2/.golangci.yml b/vendor/github.com/securego/gosec/v2/.golangci.yml index c63a7cc5a..c11694cc1 100644 --- a/vendor/github.com/securego/gosec/v2/.golangci.yml +++ b/vendor/github.com/securego/gosec/v2/.golangci.yml @@ -23,6 +23,7 @@ linters: - nolintlint - revive - staticcheck + - testifylint - typecheck - unconvert - unparam @@ -40,10 +41,14 @@ linters-settings: - all - '-SA1019' + testifylint: + enable-all: true + revive: rules: - name: dot-imports disabled: true + - name: redefines-builtin-id run: timeout: 5m diff --git a/vendor/github.com/securego/gosec/v2/.goreleaser.yml b/vendor/github.com/securego/gosec/v2/.goreleaser.yml index bd85bab3a..7ef0d7a3d 100644 --- a/vendor/github.com/securego/gosec/v2/.goreleaser.yml +++ b/vendor/github.com/securego/gosec/v2/.goreleaser.yml @@ -1,4 +1,5 @@ --- +version: 2 project_name: gosec release: diff --git a/vendor/github.com/securego/gosec/v2/CONTRIBUTING.md b/vendor/github.com/securego/gosec/v2/CONTRIBUTING.md new file mode 100644 index 000000000..32752ad59 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +## Adding a new rule + +New rules can be implemented in two ways: + +- as a `gosec.Rule` -- these define an arbitrary function which will be called on every AST node in the analyzed file, and are appropriate for rules that mostly need to reason about a single statement. +- as an Analyzer -- these can operate on the entire program, and receive an [SSA](https://pkg.go.dev/golang.org/x/tools/go/ssa) representation of the package. This type of rule is useful when you need to perform a more complex analysis that requires a great deal of context. + +### Adding a gosec.Rule + +1. Copy an existing rule file as a starting point-- `./rules/unsafe.go` is a good option, as it implements a very simple rule with no additional supporting logic. Put the copied file in the `./rules/` directory. +2. Change the name of the rule constructor function and of the types in the rule file you've copied so they will be unique. +3. Edit the `Generate` function in `./rules/rulelist.go` to include your rule. +4. Add a RuleID to CWE ID mapping for your rule to the `ruleToCWE` map in `./issue/issue.go`. If you need a CWE that isn't already defined in `./cwe/data.go`, add it to the `idWeaknessess` map in that file. +5. Use `make` to compile `gosec`. The binary will now contain your rule. + +To make your rule actually useful, you will likely want to use the support functions defined in `./resolve.go`, `./helpers.go` and `./call_list.go`. There are inline comments explaining the purpose of most of these functions, and you can find usage examples in the existing rule files. + +### Adding an Analyzer + +1. Create a new go file under `./analyzers/` with the following scaffolding in it: + +```go +package analyzers + +import ( + "fmt" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "github.com/securego/gosec/v2/issue" +) + +const defaultIssueDescriptionMyAnalyzer = "My new analyzer!" + +func newMyAnalyzer(id string, description string) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: id, + Doc: description, + Run: runMyAnalyzer, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + } +} + +func runMyAnalyzer(pass *analysis.Pass) (interface{}, error) { + ssaResult, err := getSSAResult(pass) + if err != nil { + return nil, fmt.Errorf("building ssa representation: %w", err) + } + var issues []*issue.Issue + fmt.Printf("My Analyzer ran! %+v\n", ssaResult) + + return issues, nil +} +``` + +2. Add the analyzer to `./analyzers/analyzerslist.go` in the `defaultAnalyzers` variable under an entry like `{"G999", "My test analyzer", newMyAnalyzer}` +3. Add a RuleID to CWE ID mapping for your rule to the `ruleToCWE` map in `./issue/issue.go`. If you need a CWE that isn't already defined in `./cwe/data.go`, add it to the `idWeaknessess` map in that file. +4. `make`; then run the `gosec` binary produced. You should see the output from our print statement. +5. You now have a working example analyzer to play with-- look at the other implemented analyzers for ideas on how to make useful rules. + +## Developing your rule + +There are some utility tools which are useful for analyzing the SSA and AST representation `gosec` works with before writing rules or analyzers. + +For instance to dump the SSA, the [ssadump](https://pkg.go.dev/golang.org/x/tools/cmd/ssadump) tool can be used as following: + +```bash +ssadump -build F main.go +``` + +Consult the documentation for ssadump for an overview of available output flags and options. + +For outputting the AST and supporting information, there is a utility tool in which can be compiled and used as standalone. + +```bash +gosecutil -tool ast main.go +``` + +Valid tool arguments for this command are `ast`, `callobj`, `uses`, `types`, `defs`, `comments`, and `imports`. diff --git a/vendor/github.com/securego/gosec/v2/README.md b/vendor/github.com/securego/gosec/v2/README.md index 7b8b526a8..5e68182cb 100644 --- a/vendor/github.com/securego/gosec/v2/README.md +++ b/vendor/github.com/securego/gosec/v2/README.md @@ -22,6 +22,7 @@ You may obtain a copy of the License [here](http://www.apache.org/licenses/LICEN [![Downloads](https://img.shields.io/github/downloads/securego/gosec/total.svg)](https://github.com/securego/gosec/releases) [![Docker Pulls](https://img.shields.io/docker/pulls/securego/gosec.svg)](https://hub.docker.com/r/securego/gosec/tags) [![Slack](https://img.shields.io/badge/Slack-4A154B?style=for-the-badge&logo=slack&logoColor=white)](http://securego.slack.com) +[![go-recipes](https://raw.githubusercontent.com/nikolaydubina/go-recipes/main/badge.svg?raw=true)](https://github.com/nikolaydubina/go-recipes) ## Install @@ -211,30 +212,9 @@ A number of global settings can be provided in a configuration file as follows: $ gosec -conf config.json . ``` -Also some rules accept configuration. For instance on rule `G104`, it is possible to define packages along with a list -of functions which will be skipped when auditing the not checked errors: +#### Rule Configuration -```JSON -{ - "G104": { - "ioutil": ["WriteFile"] - } -} -``` - -You can also configure the hard-coded credentials rule `G101` with additional patterns, or adjust the entropy threshold: - -```JSON -{ - "G101": { - "pattern": "(?i)passwd|pass|password|pwd|secret|private_key|token", - "ignore_entropy": false, - "entropy_threshold": "80.0", - "per_char_threshold": "3.0", - "truncate": "32" - } -} -``` +Some rules accept configuration flags as well; these flags are documented in [RULES.md](https://github.com/securego/gosec/blob/master/RULES.md). #### Go version @@ -308,7 +288,7 @@ func main() { } client := &http.Client{Transport: tr} - _, err := client.Get("https://golang.org/") + _, err := client.Get("https://go.dev/") if err != nil { fmt.Println(err) } @@ -355,7 +335,7 @@ comment. ### Build tags -gosec is able to pass your [Go build tags](https://golang.org/pkg/go/build/) to the analyzer. +gosec is able to pass your [Go build tags](https://pkg.go.dev/go/build/) to the analyzer. They can be provided as a comma separated list as follows: ```bash @@ -387,6 +367,8 @@ $ gosec -fmt=json -out=results.json -stdout -verbose=text *.go ## Development +[CONTRIBUTING.md](https://github.com/securego/gosec/blob/master/CONTRIBUTING.md) contains detailed information about adding new rules to gosec. + ### Build You can build the binary with: diff --git a/vendor/github.com/securego/gosec/v2/RULES.md b/vendor/github.com/securego/gosec/v2/RULES.md new file mode 100644 index 000000000..94cfd76a8 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/RULES.md @@ -0,0 +1,61 @@ +# Rule Documentation + +## Rules accepting parameters + +As [README.md](https://github.com/securego/gosec/blob/master/README.md) mentions, some rules can be configured by adding parameters to the gosec JSON config. Per rule configs are encoded as top level objects in the gosec config, with the rule ID (`Gxxx`) as the key. + +Currently, the following rules accept parameters. This list is manually maintained; if you notice an omission please add it! + +### G101 + +The hard-coded credentials rule `G101` can be configured with additional patterns, and the entropy threshold can be adjusted: + +```JSON +{ + "G101": { + "pattern": "(?i)passwd|pass|password|pwd|secret|private_key|token", + "ignore_entropy": false, + "entropy_threshold": "80.0", + "per_char_threshold": "3.0", + "truncate": "32" + } +} +``` + +### G104 + +The unchecked error value rule `G104` can be configured with additional functions that should be permitted to be called without checking errors. + +```JSON +{ + "G104": { + "ioutil": ["WriteFile"] + } +} +``` + +### G111 + +The HTTP Directory serving rule `G111` can be configured with a different regex for detecting potentially overly permissive servers. Note that this *replaces* the default pattern of `http\.Dir\("\/"\)|http\.Dir\('\/'\)`. + +```JSON +{ + "G111": { + "pattern": "http\\.Dir\\(\"\\\/\"\\)|http\\.Dir\\('\\\/'\\)" + } +} + +``` + +### G301, G302, G306, G307 + +The various file and directory permission checking rules can be configured with a different maximum allowable file permission. + +```JSON +{ + "G301":"0o600", + "G302":"0o600", + "G306":"0o750", + "G307":"0o750" +} +``` diff --git a/vendor/github.com/securego/gosec/v2/action.yml b/vendor/github.com/securego/gosec/v2/action.yml index 2b2deaab7..edd7e7083 100644 --- a/vendor/github.com/securego/gosec/v2/action.yml +++ b/vendor/github.com/securego/gosec/v2/action.yml @@ -10,7 +10,7 @@ inputs: runs: using: 'docker' - image: 'docker://securego/gosec:2.21.3' + image: 'docker://securego/gosec:2.22.1' args: - ${{ inputs.args }} diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go index bfa7e1940..186cc3c25 100644 --- a/vendor/github.com/securego/gosec/v2/analyzer.go +++ b/vendor/github.com/securego/gosec/v2/analyzer.go @@ -16,6 +16,7 @@ package gosec import ( + "errors" "fmt" "go/ast" "go/build" @@ -543,8 +544,8 @@ func (gosec *Analyzer) ParseErrors(pkg *packages.Package) error { // AppendError appends an error to the file errors func (gosec *Analyzer) AppendError(file string, err error) { // Do not report the error for empty packages (e.g. files excluded from build with a tag) - r := regexp.MustCompile(`no buildable Go source files in`) - if r.MatchString(err.Error()) { + var noGoErr *build.NoGoError + if errors.As(err, &noGoErr) { return } errors := make([]Error, 0) @@ -558,66 +559,71 @@ func (gosec *Analyzer) AppendError(file string, err error) { // ignore a node (and sub-tree) if it is tagged with a nosec tag comment func (gosec *Analyzer) ignore(n ast.Node) map[string]issue.SuppressionInfo { - if groups, ok := gosec.context.Comments[n]; ok && !gosec.ignoreNosec { + if gosec.ignoreNosec { + return nil + } + groups, ok := gosec.context.Comments[n] + if !ok { + return nil + } - // Checks if an alternative for #nosec is set and, if not, uses the default. - noSecDefaultTag, err := gosec.config.GetGlobal(Nosec) - if err != nil { - noSecDefaultTag = NoSecTag(string(Nosec)) - } else { - noSecDefaultTag = NoSecTag(noSecDefaultTag) - } - noSecAlternativeTag, err := gosec.config.GetGlobal(NoSecAlternative) - if err != nil { - noSecAlternativeTag = noSecDefaultTag - } else { - noSecAlternativeTag = NoSecTag(noSecAlternativeTag) - } + // Checks if an alternative for #nosec is set and, if not, uses the default. + noSecDefaultTag, err := gosec.config.GetGlobal(Nosec) + if err != nil { + noSecDefaultTag = NoSecTag(string(Nosec)) + } else { + noSecDefaultTag = NoSecTag(noSecDefaultTag) + } + noSecAlternativeTag, err := gosec.config.GetGlobal(NoSecAlternative) + if err != nil { + noSecAlternativeTag = noSecDefaultTag + } else { + noSecAlternativeTag = NoSecTag(noSecAlternativeTag) + } - for _, group := range groups { - comment := strings.TrimSpace(group.Text()) - foundDefaultTag := strings.HasPrefix(comment, noSecDefaultTag) || regexp.MustCompile("\n *"+noSecDefaultTag).MatchString(comment) - foundAlternativeTag := strings.HasPrefix(comment, noSecAlternativeTag) || regexp.MustCompile("\n *"+noSecAlternativeTag).MatchString(comment) + for _, group := range groups { + comment := strings.TrimSpace(group.Text()) + foundDefaultTag := strings.HasPrefix(comment, noSecDefaultTag) || regexp.MustCompile("\n *"+noSecDefaultTag).MatchString(comment) + foundAlternativeTag := strings.HasPrefix(comment, noSecAlternativeTag) || regexp.MustCompile("\n *"+noSecAlternativeTag).MatchString(comment) - if foundDefaultTag || foundAlternativeTag { - gosec.stats.NumNosec++ + if foundDefaultTag || foundAlternativeTag { + gosec.stats.NumNosec++ - // Discard what's in front of the nosec tag. - if foundDefaultTag { - comment = strings.SplitN(comment, noSecDefaultTag, 2)[1] - } else { - comment = strings.SplitN(comment, noSecAlternativeTag, 2)[1] - } + // Discard what's in front of the nosec tag. + if foundDefaultTag { + comment = strings.SplitN(comment, noSecDefaultTag, 2)[1] + } else { + comment = strings.SplitN(comment, noSecAlternativeTag, 2)[1] + } - // Extract the directive and the justification. - justification := "" - commentParts := regexp.MustCompile(`-{2,}`).Split(comment, 2) - directive := commentParts[0] - if len(commentParts) > 1 { - justification = strings.TrimSpace(strings.TrimRight(commentParts[1], "\n")) - } + // Extract the directive and the justification. + justification := "" + commentParts := regexp.MustCompile(`-{2,}`).Split(comment, 2) + directive := commentParts[0] + if len(commentParts) > 1 { + justification = strings.TrimSpace(strings.TrimRight(commentParts[1], "\n")) + } - // Pull out the specific rules that are listed to be ignored. - re := regexp.MustCompile(`(G\d{3})`) - matches := re.FindAllStringSubmatch(directive, -1) + // Pull out the specific rules that are listed to be ignored. + re := regexp.MustCompile(`(G\d{3})`) + matches := re.FindAllStringSubmatch(directive, -1) - suppression := issue.SuppressionInfo{ - Kind: "inSource", - Justification: justification, - } + suppression := issue.SuppressionInfo{ + Kind: "inSource", + Justification: justification, + } - // Find the rule IDs to ignore. - ignores := make(map[string]issue.SuppressionInfo) - for _, v := range matches { - ignores[v[1]] = suppression - } + // Find the rule IDs to ignore. + ignores := make(map[string]issue.SuppressionInfo) + for _, v := range matches { + ignores[v[1]] = suppression + } - // If no specific rules were given, ignore everything. - if len(matches) == 0 { - ignores[aliasOfAllRules] = suppression - } - return ignores + // If no specific rules were given, ignore everything. + if len(matches) == 0 { + ignores[aliasOfAllRules] = suppression } + return ignores } } return nil diff --git a/vendor/github.com/securego/gosec/v2/analyzers/conversion_overflow.go b/vendor/github.com/securego/gosec/v2/analyzers/conversion_overflow.go index bebe9b834..42e186710 100644 --- a/vendor/github.com/securego/gosec/v2/analyzers/conversion_overflow.go +++ b/vendor/github.com/securego/gosec/v2/analyzers/conversion_overflow.go @@ -15,6 +15,7 @@ package analyzers import ( + "cmp" "fmt" "go/token" "math" @@ -22,7 +23,6 @@ import ( "strconv" "strings" - "golang.org/x/exp/constraints" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/ssa" @@ -49,7 +49,7 @@ type rangeResult struct { type branchResults struct { minValue *int maxValue *uint - explixitPositiveVals []uint + explicitPositiveVals []uint explicitNegativeVals []int convertFound bool } @@ -141,8 +141,8 @@ func parseIntType(intType string) (integer, error) { return integer{}, fmt.Errorf("invalid bit size: %d", intSize) } - var min int - var max uint + var minVal int + var maxVal uint if signed { shiftAmount := intSize - 1 @@ -152,19 +152,19 @@ func parseIntType(intType string) (integer, error) { return integer{}, fmt.Errorf("invalid shift amount: %d", shiftAmount) } - max = (1 << uint(shiftAmount)) - 1 - min = -1 << (intSize - 1) + maxVal = (1 << uint(shiftAmount)) - 1 + minVal = -1 << (intSize - 1) } else { - max = (1 << uint(intSize)) - 1 - min = 0 + maxVal = (1 << uint(intSize)) - 1 + minVal = 0 } return integer{ signed: signed, size: intSize, - min: min, - max: max, + min: minVal, + max: maxVal, }, nil } @@ -226,7 +226,12 @@ func isStringToIntConversion(instr *ssa.Convert, dstType string) bool { if err != nil { return false } - isSafe := bitSizeValue <= dstInt.size && signed == dstInt.signed + + // we're good if: + // - signs match and bit size is <= than destination + // - parsing unsigned and bit size is < than destination + isSafe := (bitSizeValue <= dstInt.size && signed == dstInt.signed) || + (bitSizeValue < dstInt.size && !signed) return isSafe } } @@ -269,8 +274,8 @@ func hasExplicitRangeCheck(instr *ssa.Convert, dstType string) bool { case *ssa.If: result := getResultRange(v, instr, visitedIfs) if result.isRangeCheck { - minValue = max(minValue, &result.minValue) - maxValue = min(maxValue, &result.maxValue) + minValue = max(minValue, result.minValue) + maxValue = min(maxValue, result.maxValue) explicitPositiveVals = append(explicitPositiveVals, result.explicitPositiveVals...) explicitNegativeVals = append(explicitNegativeVals, result.explicitNegativeVals...) } @@ -323,17 +328,17 @@ func getResultRange(ifInstr *ssa.If, instr *ssa.Convert, visitedIfs map[*ssa.If] if thenBounds.convertFound { result.convertFound = true - result.minValue = max(result.minValue, thenBounds.minValue) - result.maxValue = min(result.maxValue, thenBounds.maxValue) + result.minValue = maxWithPtr(result.minValue, thenBounds.minValue) + result.maxValue = minWithPtr(result.maxValue, thenBounds.maxValue) } else if elseBounds.convertFound { result.convertFound = true - result.minValue = max(result.minValue, elseBounds.minValue) - result.maxValue = min(result.maxValue, elseBounds.maxValue) + result.minValue = maxWithPtr(result.minValue, elseBounds.minValue) + result.maxValue = minWithPtr(result.maxValue, elseBounds.maxValue) } - result.explicitPositiveVals = append(result.explicitPositiveVals, thenBounds.explixitPositiveVals...) + result.explicitPositiveVals = append(result.explicitPositiveVals, thenBounds.explicitPositiveVals...) result.explicitNegativeVals = append(result.explicitNegativeVals, thenBounds.explicitNegativeVals...) - result.explicitPositiveVals = append(result.explicitPositiveVals, elseBounds.explixitPositiveVals...) + result.explicitPositiveVals = append(result.explicitPositiveVals, elseBounds.explicitPositiveVals...) result.explicitNegativeVals = append(result.explicitNegativeVals, elseBounds.explicitNegativeVals...) return result @@ -383,14 +388,14 @@ func updateResultFromBinOp(result *rangeResult, binOp *ssa.BinOp, instr *ssa.Con } if op == "neg" { - min := result.minValue - max := result.maxValue + minVal := result.minValue + maxVal := result.maxValue - if min >= 0 { - result.maxValue = uint(min) + if minVal >= 0 { + result.maxValue = uint(minVal) } - if max <= math.MaxInt { - result.minValue = int(max) + if maxVal <= math.MaxInt { + result.minValue = int(maxVal) } } } @@ -444,9 +449,9 @@ func walkBranchForConvert(block *ssa.BasicBlock, instr *ssa.Convert, visitedIfs bounds.convertFound = bounds.convertFound || result.convertFound if result.isRangeCheck { - bounds.minValue = toPtr(max(result.minValue, bounds.minValue)) - bounds.maxValue = toPtr(min(result.maxValue, bounds.maxValue)) - bounds.explixitPositiveVals = append(bounds.explixitPositiveVals, result.explicitPositiveVals...) + bounds.minValue = toPtr(maxWithPtr(result.minValue, bounds.minValue)) + bounds.maxValue = toPtr(minWithPtr(result.maxValue, bounds.maxValue)) + bounds.explicitPositiveVals = append(bounds.explicitPositiveVals, result.explicitPositiveVals...) bounds.explicitNegativeVals = append(bounds.explicitNegativeVals, result.explicitNegativeVals...) } case *ssa.Call: @@ -535,24 +540,18 @@ func explicitValsInRange(explicitPosVals []uint, explicitNegVals []int, dstInt i return true } -func min[T constraints.Integer](a T, b *T) T { +func minWithPtr[T cmp.Ordered](a T, b *T) T { if b == nil { return a } - if a < *b { - return a - } - return *b + return min(a, *b) } -func max[T constraints.Integer](a T, b *T) T { +func maxWithPtr[T cmp.Ordered](a T, b *T) T { if b == nil { return a } - if a > *b { - return a - } - return *b + return max(a, *b) } func toPtr[T any](a T) *T { diff --git a/vendor/github.com/securego/gosec/v2/analyzers/hardcodedNonce.go b/vendor/github.com/securego/gosec/v2/analyzers/hardcoded_nonce.go similarity index 97% rename from vendor/github.com/securego/gosec/v2/analyzers/hardcodedNonce.go rename to vendor/github.com/securego/gosec/v2/analyzers/hardcoded_nonce.go index b07363388..c9c5395fb 100644 --- a/vendor/github.com/securego/gosec/v2/analyzers/hardcodedNonce.go +++ b/vendor/github.com/securego/gosec/v2/analyzers/hardcoded_nonce.go @@ -48,10 +48,7 @@ func runHardCodedNonce(pass *analysis.Pass) (interface{}, error) { // Example "Test" 3, 1 -- means the function "Test" which accepts 3 arguments, and has the nonce arg as second argument calls := map[string][]int{ "(crypto/cipher.AEAD).Seal": {4, 1}, - "(crypto/cipher.AEAD).Open": {4, 1}, - "crypto/cipher.NewCBCDecrypter": {2, 1}, "crypto/cipher.NewCBCEncrypter": {2, 1}, - "crypto/cipher.NewCFBDecrypter": {2, 1}, "crypto/cipher.NewCFBEncrypter": {2, 1}, "crypto/cipher.NewCTR": {2, 1}, "crypto/cipher.NewOFB": {2, 1}, @@ -163,9 +160,9 @@ func iterateThroughReferrers(variable ssa.Value, funcsToTrack map[string][]int, if refs == nil { return gosecIssues, nil } - // Go trough all functions that use the given arg variable + // Go through all functions that use the given arg variable for _, ref := range *refs { - // Iterate trough the functions we are interested + // Iterate through the functions we are interested for trackedFunc := range funcsToTrack { // Split the functions we are interested in, by the '.' because we will use the function name to do the comparison diff --git a/vendor/github.com/securego/gosec/v2/rules/errors.go b/vendor/github.com/securego/gosec/v2/rules/errors.go index d31248ccb..278642655 100644 --- a/vendor/github.com/securego/gosec/v2/rules/errors.go +++ b/vendor/github.com/securego/gosec/v2/rules/errors.go @@ -105,7 +105,7 @@ func NewNoErrorCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { ID: id, Severity: issue.Low, Confidence: issue.High, - What: "Errors unhandled.", + What: "Errors unhandled", }, whitelist: whitelist, }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/fileperms.go b/vendor/github.com/securego/gosec/v2/rules/fileperms.go index eb1fa2eee..bf2a95953 100644 --- a/vendor/github.com/securego/gosec/v2/rules/fileperms.go +++ b/vendor/github.com/securego/gosec/v2/rules/fileperms.go @@ -157,7 +157,7 @@ func (r *osCreatePermissions) Match(n ast.Node, c *gosec.Context) (*issue.Issue, return nil, nil } -// NewOsCreatePerms reates a rule to detect file creation with a more permissive than configured +// NewOsCreatePerms creates a rule to detect file creation with a more permissive than configured // permission mask. func NewOsCreatePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode := getConfiguredMode(conf, id, 0o666) diff --git a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go index 75de4ed8c..ee2358c76 100644 --- a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go +++ b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go @@ -47,7 +47,7 @@ func doGetIdentExpr(expr ast.Expr, hasSelector bool) (*ast.Ident, bool) { } func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { - // This rule does not apply for Go 1.22, see https://tip.golang.org/doc/go1.22#language. + // This rule does not apply for Go 1.22, see https://go.dev/doc/go1.22#language. major, minor, _ := gosec.GoVersion() if major >= 1 && minor >= 22 { return nil, nil diff --git a/vendor/github.com/shazow/go-diff/difflib/differ.go b/vendor/github.com/shazow/go-diff/difflib/differ.go deleted file mode 100644 index 43dc84d9a..000000000 --- a/vendor/github.com/shazow/go-diff/difflib/differ.go +++ /dev/null @@ -1,39 +0,0 @@ -// This package implements the diff.Differ interface using github.com/mb0/diff as a backend. -package difflib - -import ( - "io" - "io/ioutil" - - "github.com/pmezard/go-difflib/difflib" -) - -type differ struct{} - -// New returns an implementation of diff.Differ using mb0diff as the backend. -func New() *differ { - return &differ{} -} - -// Diff consumes the entire reader streams into memory before generating a diff -// which then gets filled into the buffer. This implementation stores and -// manipulates all three values in memory. -func (diff *differ) Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error { - var src, dst []byte - var err error - - if src, err = ioutil.ReadAll(a); err != nil { - return err - } - if dst, err = ioutil.ReadAll(b); err != nil { - return err - } - - d := difflib.UnifiedDiff{ - A: difflib.SplitLines(string(src)), - B: difflib.SplitLines(string(dst)), - Context: 3, - } - - return difflib.WriteUnifiedDiff(out, d) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig b/vendor/github.com/spf13/afero/.editorconfig similarity index 68% rename from vendor/github.com/sagikazarmark/slog-shim/.editorconfig rename to vendor/github.com/spf13/afero/.editorconfig index 1fb0e1bec..4492e9f9f 100644 --- a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig +++ b/vendor/github.com/spf13/afero/.editorconfig @@ -8,11 +8,5 @@ indent_style = space insert_final_newline = true trim_trailing_whitespace = true -[*.nix] -indent_size = 2 - -[{Makefile,*.mk}] +[*.go] indent_style = tab - -[Taskfile.yaml] -indent_size = 2 diff --git a/vendor/github.com/spf13/afero/.golangci.yaml b/vendor/github.com/spf13/afero/.golangci.yaml new file mode 100644 index 000000000..806289a25 --- /dev/null +++ b/vendor/github.com/spf13/afero/.golangci.yaml @@ -0,0 +1,18 @@ +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/afero) + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - staticcheck + +issues: + exclude-dirs: + - gcsfs/internal/stiface diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 3bafbfdfc..619af574f 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -12,7 +12,7 @@ types and methods. Afero has an exceptionally clean interface and simple design without needless constructors or initialization methods. Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power +filesystems that make it easy to work with, while retaining all the power and benefit of the os and ioutil packages. Afero provides significant improvements over using the os package alone, most diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index 938b9316e..b13155ca4 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -255,7 +255,6 @@ func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { ret := make([]os.FileInfo, len(entries)) for i := range entries { ret[i], err = entries[i].Info() - if err != nil { return nil, err } diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index d6c744e8d..ed92f5649 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -16,11 +16,9 @@ package afero import ( "fmt" "io" - "log" "os" "path/filepath" - "sort" "strings" "sync" diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 0e9e14593..1be666a45 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,6 +1,6 @@ # cast -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index d49bbf83e..4181a2e75 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -18,6 +18,14 @@ import ( var errNegativeNotAllowed = errors.New("unable to cast negative value") +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { return ToTimeInDefaultLocationE(i, time.UTC) @@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { d, err = time.ParseDuration(s + "ns") } return - case json.Number: + case float64EProvider: var v float64 v, err = s.Float64() d = time.Duration(v) return + case float64Provider: + d = time.Duration(s.Float64()) + return default: err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) return @@ -174,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case float64Provider: + return s.Float64(), nil case bool: if s { return 1, nil @@ -230,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case float64Provider: + return float32(s.Float64()), nil case bool: if s { return 1, nil @@ -598,12 +613,12 @@ func ToUint64E(i interface{}) (uint64, error) { switch s := i.(type) { case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) if err == nil { if v < 0 { return 0, errNegativeNotAllowed } - return uint64(v), nil + return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) case json.Number: @@ -917,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} { return nil } - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + errorType := reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() v := reflect.ValueOf(a) for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { @@ -987,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) { // ToStringMapStringE casts an interface to a map[string]string type. func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} + m := map[string]string{} switch v := i.(type) { case map[string]string: @@ -1017,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) { // ToStringMapStringSliceE casts an interface to a map[string][]string type. func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} + m := map[string][]string{} switch v := i.(type) { case map[string][]string: @@ -1081,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { // ToStringMapBoolE casts an interface to a map[string]bool type. func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} + m := map[string]bool{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1106,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) { // ToStringMapE casts an interface to a map[string]interface{} type. func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} + m := map[string]interface{}{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1126,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) { // ToStringMapIntE casts an interface to a map[string]int{} type. func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} + m := map[string]int{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) } @@ -1167,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) { // ToStringMapInt64E casts an interface to a map[string]int64{} type. func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} + m := map[string]int64{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) } @@ -1404,38 +1419,35 @@ func (f timeFormat) hasTimezone() bool { return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone } -var ( - timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) +var timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, +} func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { if d, e = time.Parse(format.format, s); e == nil { diff --git a/vendor/github.com/spf13/viper/.envrc b/vendor/github.com/spf13/viper/.envrc index 3ce7171a3..2e0f9f5f7 100644 --- a/vendor/github.com/spf13/viper/.envrc +++ b/vendor/github.com/spf13/viper/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" fi use flake . --impure diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml index 1faeae42c..474f41633 100644 --- a/vendor/github.com/spf13/viper/.golangci.yaml +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -17,8 +17,6 @@ linters-settings: disabled-checks: - importShadow - unnamedResult - golint: - min-confidence: 0 goimports: local-prefixes: github.com/spf13/viper @@ -30,7 +28,6 @@ linters: - dupl - durationcheck - exhaustive - - exportloopref - gci - gocritic - godot diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 3fc7d84f1..769a5d900 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -3,7 +3,8 @@ > > **Thank you!** -![Viper](.github/logo.png?raw=true) +![viper logo](https://github.com/user-attachments/assets/acae9193-2974-41f3-808d-2d433f5ada5e) + [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) [![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) @@ -11,7 +12,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -802,7 +803,7 @@ if err != nil { } ``` -Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +Viper uses [github.com/go-viper/mapstructure](https://github.com/go-viper/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. ### Decoding custom formats @@ -836,13 +837,15 @@ func yamlStringSettings() string { ## Viper or Vipers? -Viper comes ready to use out of the box. There is no configuration or -initialization needed to begin using Viper. Since most applications will want -to use a single central repository for their configuration, the viper package -provides this. It is similar to a singleton. +Viper comes with a global instance (singleton) out of the box. + +Although it makes setting up configuration easy, +using it is generally discouraged as it makes testing harder and can lead to unexpected behavior. + +The best practice is to initialize a Viper instance and pass that around when necessary. -In all of the examples above, they demonstrate using viper in its singleton -style approach. +The global instance _MAY_ be deprecated in the future. +See [#1855](https://github.com/spf13/viper/issues/1855) for more details. ### Working with multiple vipers diff --git a/vendor/github.com/spf13/viper/UPDATES.md b/vendor/github.com/spf13/viper/UPDATES.md new file mode 100644 index 000000000..ccf413ed7 --- /dev/null +++ b/vendor/github.com/spf13/viper/UPDATES.md @@ -0,0 +1,126 @@ +# Update Log + +**This document details any major updates required to use new features or improvements in Viper.** + +## v1.20.x + +### New file searching API + +Viper now includes a new file searching API that allows users to customize how Viper looks for config files. + +Viper accepts a custom [`Finder`](https://pkg.go.dev/github.com/spf13/viper#Finder) interface implementation: + +```go +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder interface { + Find(fsys afero.Fs) ([]string, error) +} +``` + +It is supposed to return a list of paths to config files. + +The default implementation uses [github.com/sagikazarmark/locafero](https://github.com/sagikazarmark/locafero) under the hood. + +You can supply your own implementation using `WithFinder`: + +```go +v := viper.NewWithOptions( + viper.WithFinder(&MyFinder{}), +) +``` + +For more information, check out the [Finder examples](https://pkg.go.dev/github.com/spf13/viper#Finder) +and the [documentation](https://pkg.go.dev/github.com/sagikazarmark/locafero) for the locafero package. + +### New encoding API + +Viper now allows customizing the encoding layer by providing an API for encoding and decoding configuration data: + +```go +// Encoder encodes Viper's internal data structures into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +// Decoder decodes the contents of a byte slice into Viper's internal data structures. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +// Codec combines [Encoder] and [Decoder] interfaces. +type Codec interface { + Encoder + Decoder +} +``` + +By default, Viper includes the following codecs: + +- JSON +- TOML +- YAML +- Dotenv + +The rest of the codecs are moved to [github.com/go-viper/encoding](https://github.com/go-viper/encoding) + +Customizing the encoding layer is possible by providing a custom registry of codecs: + +- [Encoder](https://pkg.go.dev/github.com/spf13/viper#Encoder) -> [EncoderRegistry](https://pkg.go.dev/github.com/spf13/viper#EncoderRegistry) +- [Decoder](https://pkg.go.dev/github.com/spf13/viper#Decoder) -> [DecoderRegistry](https://pkg.go.dev/github.com/spf13/viper#DecoderRegistry) +- [Codec](https://pkg.go.dev/github.com/spf13/viper#Codec) -> [CodecRegistry](https://pkg.go.dev/github.com/spf13/viper#CodecRegistry) + +You can supply the registry of codecs to Viper using the appropriate `With*Registry` function: + +```go +codecRegistry := viper.NewCodecRegistry() + +codecRegistry.RegisterCodec("myformat", &MyCodec{}) + +v := viper.NewWithOptions( + viper.WithCodecRegistry(codecRegistry), +) +``` + +### BREAKING: HCL, Java properties, INI removed from core + +In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: + +- HCL +- Java properties +- INI + +You can still use these formats though by importing them from [github.com/go-viper/encoding](https://github.com/go-viper/encoding): + +```go +import ( + "github.com/go-viper/encoding/hcl" + "github.com/go-viper/encoding/javaproperties" + "github.com/go-viper/encoding/ini" +) + +codecRegistry := viper.NewCodecRegistry() + +{ + codec := hcl.Codec{} + + codecRegistry.RegisterCodec("hcl", codec) + codecRegistry.RegisterCodec("tfvars", codec) + +} + +{ + codec := &javaproperties.Codec{} + + codecRegistry.RegisterCodec("properties", codec) + codecRegistry.RegisterCodec("props", codec) + codecRegistry.RegisterCodec("prop", codec) +} + +codecRegistry.RegisterCodec("ini", ini.Codec{}) + +v := viper.NewWithOptions( + viper.WithCodecRegistry(codecRegistry), +) +``` diff --git a/vendor/github.com/spf13/viper/encoding.go b/vendor/github.com/spf13/viper/encoding.go new file mode 100644 index 000000000..a7da55860 --- /dev/null +++ b/vendor/github.com/spf13/viper/encoding.go @@ -0,0 +1,181 @@ +package viper + +import ( + "errors" + "strings" + "sync" + + "github.com/spf13/viper/internal/encoding/dotenv" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" +) + +// Encoder encodes Viper's internal data structures into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +// Decoder decodes the contents of a byte slice into Viper's internal data structures. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +// Codec combines [Encoder] and [Decoder] interfaces. +type Codec interface { + Encoder + Decoder +} + +// TODO: consider adding specific errors for not found scenarios + +// EncoderRegistry returns an [Encoder] for a given format. +// +// Format is case-insensitive. +// +// [EncoderRegistry] returns an error if no [Encoder] is registered for the format. +type EncoderRegistry interface { + Encoder(format string) (Encoder, error) +} + +// DecoderRegistry returns an [Decoder] for a given format. +// +// Format is case-insensitive. +// +// [DecoderRegistry] returns an error if no [Decoder] is registered for the format. +type DecoderRegistry interface { + Decoder(format string) (Decoder, error) +} + +// [CodecRegistry] combines [EncoderRegistry] and [DecoderRegistry] interfaces. +type CodecRegistry interface { + EncoderRegistry + DecoderRegistry +} + +// WithEncoderRegistry sets a custom [EncoderRegistry]. +func WithEncoderRegistry(r EncoderRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.encoderRegistry = r + }) +} + +// WithDecoderRegistry sets a custom [DecoderRegistry]. +func WithDecoderRegistry(r DecoderRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.decoderRegistry = r + }) +} + +// WithCodecRegistry sets a custom [EncoderRegistry] and [DecoderRegistry]. +func WithCodecRegistry(r CodecRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.encoderRegistry = r + v.decoderRegistry = r + }) +} + +// DefaultCodecRegistry is a simple implementation of [CodecRegistry] that allows registering custom [Codec]s. +type DefaultCodecRegistry struct { + codecs map[string]Codec + + mu sync.RWMutex + once sync.Once +} + +// NewCodecRegistry returns a new [CodecRegistry], ready to accept custom [Codec]s. +func NewCodecRegistry() *DefaultCodecRegistry { + r := &DefaultCodecRegistry{} + + r.init() + + return r +} + +func (r *DefaultCodecRegistry) init() { + r.once.Do(func() { + r.codecs = map[string]Codec{} + }) +} + +// RegisterCodec registers a custom [Codec]. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) RegisterCodec(format string, codec Codec) error { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + r.codecs[strings.ToLower(format)] = codec + + return nil +} + +// Encoder implements the [EncoderRegistry] interface. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) Encoder(format string) (Encoder, error) { + encoder, ok := r.codec(format) + if !ok { + return nil, errors.New("encoder not found for this format") + } + + return encoder, nil +} + +// Decoder implements the [DecoderRegistry] interface. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) Decoder(format string) (Decoder, error) { + decoder, ok := r.codec(format) + if !ok { + return nil, errors.New("decoder not found for this format") + } + + return decoder, nil +} + +func (r *DefaultCodecRegistry) codec(format string) (Codec, bool) { + r.mu.Lock() + defer r.mu.Unlock() + + format = strings.ToLower(format) + + if r.codecs != nil { + codec, ok := r.codecs[format] + if ok { + return codec, true + } + } + + switch format { + case "yaml", "yml": + return yaml.Codec{}, true + + case "json": + return json.Codec{}, true + + case "toml": + return toml.Codec{}, true + + case "dotenv", "env": + return &dotenv.Codec{}, true + } + + return nil, false +} diff --git a/vendor/github.com/spf13/viper/experimental.go b/vendor/github.com/spf13/viper/experimental.go new file mode 100644 index 000000000..6e19e8a10 --- /dev/null +++ b/vendor/github.com/spf13/viper/experimental.go @@ -0,0 +1,8 @@ +package viper + +// ExperimentalBindStruct tells Viper to use the new bind struct feature. +func ExperimentalBindStruct() Option { + return optionFunc(func(v *Viper) { + v.experimentalBindStruct = true + }) +} diff --git a/vendor/github.com/spf13/viper/file.go b/vendor/github.com/spf13/viper/file.go index a54fe5a7a..50a40581d 100644 --- a/vendor/github.com/spf13/viper/file.go +++ b/vendor/github.com/spf13/viper/file.go @@ -1,5 +1,3 @@ -//go:build !finder - package viper import ( @@ -7,12 +5,62 @@ import ( "os" "path/filepath" + "github.com/sagikazarmark/locafero" "github.com/spf13/afero" ) +// ExperimentalFinder tells Viper to use the new Finder interface for finding configuration files. +func ExperimentalFinder() Option { + return optionFunc(func(v *Viper) { + v.experimentalFinder = true + }) +} + +// Search for a config file. +func (v *Viper) findConfigFile() (string, error) { + finder := v.finder + + if finder == nil && v.experimentalFinder { + var names []string + + if v.configType != "" { + names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) + } else { + names = locafero.NameWithExtensions(v.configName, SupportedExts...) + } + + finder = locafero.Finder{ + Paths: v.configPaths, + Names: names, + Type: locafero.FileTypeFile, + } + } + + if finder != nil { + return v.findConfigFileWithFinder(finder) + } + + return v.findConfigFileOld() +} + +func (v *Viper) findConfigFileWithFinder(finder Finder) (string, error) { + results, err := finder.Find(v.fs) + if err != nil { + return "", err + } + + if len(results) == 0 { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + // We call clean on the final result to ensure that the path is in its canonical form. + // This is mostly for consistent path handling and to make sure tests pass. + return results[0], nil +} + // Search all configPaths for any config file. // Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { +func (v *Viper) findConfigFileOld() (string, error) { v.logger.Info("searching for config in paths", "paths", v.configPaths) for _, cp := range v.configPaths { diff --git a/vendor/github.com/spf13/viper/file_finder.go b/vendor/github.com/spf13/viper/file_finder.go deleted file mode 100644 index d96a1bd22..000000000 --- a/vendor/github.com/spf13/viper/file_finder.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build finder - -package viper - -import ( - "fmt" - - "github.com/sagikazarmark/locafero" -) - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - var names []string - - if v.configType != "" { - names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) - } else { - names = locafero.NameWithExtensions(v.configName, SupportedExts...) - } - - finder := locafero.Finder{ - Paths: v.configPaths, - Names: names, - Type: locafero.FileTypeFile, - } - - results, err := finder.Find(v.fs) - if err != nil { - return "", err - } - - if len(results) == 0 { - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} - } - - return results[0], nil -} diff --git a/vendor/github.com/spf13/viper/finder.go b/vendor/github.com/spf13/viper/finder.go new file mode 100644 index 000000000..9b203ea69 --- /dev/null +++ b/vendor/github.com/spf13/viper/finder.go @@ -0,0 +1,55 @@ +package viper + +import ( + "errors" + + "github.com/spf13/afero" +) + +// WithFinder sets a custom [Finder]. +func WithFinder(f Finder) Option { + return optionFunc(func(v *Viper) { + if f == nil { + return + } + + v.finder = f + }) +} + +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder interface { + Find(fsys afero.Fs) ([]string, error) +} + +// Finders combines multiple finders into one. +func Finders(finders ...Finder) Finder { + return &combinedFinder{finders: finders} +} + +// combinedFinder is a Finder that combines multiple finders. +type combinedFinder struct { + finders []Finder +} + +// Find implements the [Finder] interface. +func (c *combinedFinder) Find(fsys afero.Fs) ([]string, error) { + var results []string + var errs []error + + for _, finder := range c.finders { + if finder == nil { + continue + } + + r, err := finder.Find(fsys) + if err != nil { + errs = append(errs, err) + continue + } + + results = append(results, r...) + } + + return results, errors.Join(errs...) +} diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index 3840614fa..d76dfbddd 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -1,22 +1,84 @@ { "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, "devenv": { "inputs": { - "flake-compat": "flake-compat", + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1724763216, + "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", + "owner": "cachix", + "repo": "devenv", + "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], "nix": "nix", "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] }, "locked": { - "lastModified": 1707817777, - "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", "owner": "cachix", "repo": "devenv", - "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", "type": "github" }, "original": { "owner": "cachix", + "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -37,16 +99,32 @@ "type": "github" } }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-parts": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1706830856, - "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", + "lastModified": 1722555600, + "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", + "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", "type": "github" }, "original": { @@ -60,11 +138,29 @@ "systems": "systems" }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -82,11 +178,11 @@ ] }, "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", "owner": "hercules-ci", "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", "type": "github" }, "original": { @@ -95,53 +191,90 @@ "type": "github" } }, - "lowdown-src": { - "flake": false, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { - "owner": "kristapsdz", - "repo": "lowdown", + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", "type": "github" } }, - "nix": { + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { "inputs": { - "lowdown-src": "lowdown-src", + "flake-compat": [ + "devenv", + "flake-compat" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression" + "nixpkgs-regression": "nixpkgs-regression_2" }, "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", "owner": "domenkozar", "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { "owner": "domenkozar", - "ref": "relaxed-flakes", + "ref": "devenv-2.21", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", "type": "github" }, "original": { @@ -153,23 +286,33 @@ }, "nixpkgs-lib": { "locked": { - "dir": "lib", - "lastModified": 1706550542, - "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", + "lastModified": 1722555339, + "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" }, "original": { - "dir": "lib", "owner": "NixOS", - "ref": "nixos-unstable", "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" } }, - "nixpkgs-regression": { + "nixpkgs-regression_2": { "locked": { "lastModified": 1643052045, "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", @@ -187,27 +330,43 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1707939175, - "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1724748588, + "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", + "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", "type": "github" }, "original": { @@ -217,13 +376,38 @@ "type": "github" } }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, "pre-commit-hooks": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], - "flake-utils": "flake-utils", + "flake-utils": "flake-utils_2", "gitignore": "gitignore", "nixpkgs": [ "devenv", @@ -232,11 +416,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1704725188, - "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "ea96f0c05924341c551a797aaba8126334c505d2", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", "type": "github" }, "original": { @@ -249,7 +433,7 @@ "inputs": { "devenv": "devenv", "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_3" } }, "systems": { @@ -266,6 +450,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 0230668cf..52ad7d581 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -20,7 +20,7 @@ default = { languages = { go.enable = true; - go.package = pkgs.go_1_22; + go.package = pkgs.go_1_23; }; pre-commit.hooks = { diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go deleted file mode 100644 index 8a7b1dbc9..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/decoder.go +++ /dev/null @@ -1,61 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Decoder decodes the contents of b into v. -// It's primarily used for decoding contents of a file into a map[string]any. -type Decoder interface { - Decode(b []byte, v map[string]any) error -} - -const ( - // ErrDecoderNotFound is returned when there is no decoder registered for a format. - ErrDecoderNotFound = encodingError("decoder not found for this format") - - // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. - ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") -) - -// DecoderRegistry can choose an appropriate Decoder based on the provided format. -type DecoderRegistry struct { - decoders map[string]Decoder - - mu sync.RWMutex -} - -// NewDecoderRegistry returns a new, initialized DecoderRegistry. -func NewDecoderRegistry() *DecoderRegistry { - return &DecoderRegistry{ - decoders: make(map[string]Decoder), - } -} - -// RegisterDecoder registers a Decoder for a format. -// Registering a Decoder for an already existing format is not supported. -func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.decoders[format]; ok { - return ErrDecoderFormatAlreadyRegistered - } - - e.decoders[format] = enc - - return nil -} - -// Decode calls the underlying Decoder based on the format. -func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]any) error { - e.mu.RLock() - decoder, ok := e.decoders[format] - e.mu.RUnlock() - - if !ok { - return ErrDecoderNotFound - } - - return decoder.Decode(b, v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go deleted file mode 100644 index 659585962..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/encoder.go +++ /dev/null @@ -1,60 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Encoder encodes the contents of v into a byte representation. -// It's primarily used for encoding a map[string]any into a file format. -type Encoder interface { - Encode(v map[string]any) ([]byte, error) -} - -const ( - // ErrEncoderNotFound is returned when there is no encoder registered for a format. - ErrEncoderNotFound = encodingError("encoder not found for this format") - - // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. - ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") -) - -// EncoderRegistry can choose an appropriate Encoder based on the provided format. -type EncoderRegistry struct { - encoders map[string]Encoder - - mu sync.RWMutex -} - -// NewEncoderRegistry returns a new, initialized EncoderRegistry. -func NewEncoderRegistry() *EncoderRegistry { - return &EncoderRegistry{ - encoders: make(map[string]Encoder), - } -} - -// RegisterEncoder registers an Encoder for a format. -// Registering a Encoder for an already existing format is not supported. -func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.encoders[format]; ok { - return ErrEncoderFormatAlreadyRegistered - } - - e.encoders[format] = enc - - return nil -} - -func (e *EncoderRegistry) Encode(format string, v map[string]any) ([]byte, error) { - e.mu.RLock() - encoder, ok := e.encoders[format] - e.mu.RUnlock() - - if !ok { - return nil, ErrEncoderNotFound - } - - return encoder.Encode(v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go deleted file mode 100644 index e4cde02d7..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/error.go +++ /dev/null @@ -1,7 +0,0 @@ -package encoding - -type encodingError string - -func (e encodingError) Error() string { - return string(e) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go deleted file mode 100644 index d7fa8a1b7..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go +++ /dev/null @@ -1,40 +0,0 @@ -package hcl - -import ( - "bytes" - "encoding/json" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. -// TODO: add printer config to the codec? -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - - // TODO: use printer.Format? Is the trailing newline an issue? - - ast, err := hcl.Parse(string(b)) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - - err = printer.Fprint(&buf, ast.Node) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return hcl.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go deleted file mode 100644 index d91cf59d2..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go +++ /dev/null @@ -1,99 +0,0 @@ -package ini - -import ( - "bytes" - "sort" - "strings" - - "github.com/spf13/cast" - "gopkg.in/ini.v1" -) - -// LoadOptions contains all customized options used for load data source(s). -// This type is added here for convenience: this way consumers can import a single package called "ini". -type LoadOptions = ini.LoadOptions - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. -type Codec struct { - KeyDelimiter string - LoadOptions LoadOptions -} - -func (c Codec) Encode(v map[string]any) ([]byte, error) { - cfg := ini.Empty() - ini.PrettyFormat = false - - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - sectionName, keyName := "", key - - lastSep := strings.LastIndex(key, ".") - if lastSep != -1 { - sectionName = key[:(lastSep)] - keyName = key[(lastSep + 1):] - } - - // TODO: is this a good idea? - if sectionName == "default" { - sectionName = "" - } - - cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) - } - - var buf bytes.Buffer - - _, err := cfg.WriteTo(&buf) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c Codec) Decode(b []byte, v map[string]any) error { - cfg := ini.Empty(c.LoadOptions) - - err := cfg.Append(b) - if err != nil { - return err - } - - sections := cfg.Sections() - - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - - deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) - - // set innermost value - deepestMap[key.Name()] = value - } - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go deleted file mode 100644 index 490ab594e..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package ini - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go deleted file mode 100644 index e92e5172c..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go +++ /dev/null @@ -1,86 +0,0 @@ -package javaproperties - -import ( - "bytes" - "sort" - "strings" - - "github.com/magiconair/properties" - "github.com/spf13/cast" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. -type Codec struct { - KeyDelimiter string - - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - // TODO: drop this feature in v2 - // TODO: make use of the global properties object optional - Properties *properties.Properties -} - -func (c *Codec) Encode(v map[string]any) ([]byte, error) { - if c.Properties == nil { - c.Properties = properties.NewProperties() - } - - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) - if err != nil { - return nil, err - } - } - - var buf bytes.Buffer - - _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c *Codec) Decode(b []byte, v map[string]any) error { - var err error - c.Properties, err = properties.Load(b, properties.UTF8) - if err != nil { - return err - } - - for _, key := range c.Properties.Keys() { - // ignore existence check: we know it's there - value, _ := c.Properties.Get(key) - - // recursively build nested maps - path := strings.Split(key, c.keyDelimiter()) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go deleted file mode 100644 index 6e1aff223..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package javaproperties - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/features/finder.go b/vendor/github.com/spf13/viper/internal/features/finder.go new file mode 100644 index 000000000..983ea3a9d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/finder.go @@ -0,0 +1,5 @@ +//go:build viper_finder + +package features + +const Finder = true diff --git a/vendor/github.com/spf13/viper/internal/features/finder_default.go b/vendor/github.com/spf13/viper/internal/features/finder_default.go new file mode 100644 index 000000000..89bcb06ee --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/finder_default.go @@ -0,0 +1,5 @@ +//go:build !viper_finder + +package features + +const Finder = false diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go index 8938053b3..828042f29 100644 --- a/vendor/github.com/spf13/viper/logger.go +++ b/vendor/github.com/spf13/viper/logger.go @@ -2,46 +2,9 @@ package viper import ( "context" - - slog "github.com/sagikazarmark/slog-shim" + "log/slog" ) -// Logger is a unified interface for various logging use cases and practices, including: -// - leveled logging -// - structured logging -// -// Deprecated: use `log/slog` instead. -type Logger interface { - // Trace logs a Trace event. - // - // Even more fine-grained information than Debug events. - // Loggers not supporting this level should fall back to Debug. - Trace(msg string, keyvals ...any) - - // Debug logs a Debug event. - // - // A verbose series of information events. - // They are useful when debugging the system. - Debug(msg string, keyvals ...any) - - // Info logs an Info event. - // - // General information about what's happening inside the system. - Info(msg string, keyvals ...any) - - // Warn logs a Warn(ing) event. - // - // Non-critical events that should be looked at. - Warn(msg string, keyvals ...any) - - // Error logs an Error event. - // - // Critical events that require immediate attention. - // Loggers commonly provide Fatal and Panic levels above Error level, - // but exiting and panicking is out of scope for a logging library. - Error(msg string, keyvals ...any) -} - // WithLogger sets a custom logger. func WithLogger(l *slog.Logger) Option { return optionFunc(func(v *Viper) { diff --git a/vendor/github.com/spf13/viper/remote.go b/vendor/github.com/spf13/viper/remote.go new file mode 100644 index 000000000..bdde7de26 --- /dev/null +++ b/vendor/github.com/spf13/viper/remote.go @@ -0,0 +1,256 @@ +package viper + +import ( + "bytes" + "fmt" + "io" + "reflect" + "slices" +) + +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} + +func resetRemote() { + SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} +} + +type remoteConfigFactory interface { + Get(rp RemoteProvider) (io.Reader, error) + Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) +} + +type RemoteResponse struct { + Value []byte + Error error +} + +// RemoteConfig is optional, see the remote package. +var RemoteConfig remoteConfigFactory + +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. +type UnsupportedRemoteProviderError string + +// Error returns the formatted remote provider error. +func (str UnsupportedRemoteProviderError) Error() string { + return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) +} + +// RemoteConfigError denotes encountering an error while trying to +// pull the configuration from the remote provider. +type RemoteConfigError string + +// Error returns the formatted remote provider error. +func (rce RemoteConfigError) Error() string { + return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) +} + +type defaultRemoteProvider struct { + provider string + endpoint string + path string + secretKeyring string +} + +func (rp defaultRemoteProvider) Provider() string { + return rp.provider +} + +func (rp defaultRemoteProvider) Endpoint() string { + return rp.endpoint +} + +func (rp defaultRemoteProvider) Path() string { + return rp.path +} + +func (rp defaultRemoteProvider) SecretKeyring() string { + return rp.secretKeyring +} + +// RemoteProvider stores the configuration necessary +// to connect to a remote key/value store. +// Optional secretKeyring to unencrypt encrypted values +// can be provided. +type RemoteProvider interface { + Provider() string + Endpoint() string + Path() string + SecretKeyring() string +} + +// AddRemoteProvider adds a remote configuration source. +// Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp". +func AddRemoteProvider(provider, endpoint, path string) error { + return v.AddRemoteProvider(provider, endpoint, path) +} + +func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { + if !slices.Contains(SupportedRemoteProviders, provider) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +// AddSecureRemoteProvider adds a remote configuration source. +// Secure Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp". +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. +func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) +} + +func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + if !slices.Contains(SupportedRemoteProviders, provider) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + secretKeyring: secretkeyring, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { + for _, y := range v.remoteProviders { + if reflect.DeepEqual(y, p) { + return true + } + } + return false +} + +// ReadRemoteConfig attempts to get configuration from a remote source +// and read it in the remote configuration registry. +func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + +func (v *Viper) ReadRemoteConfig() error { + return v.getKeyValueConfig() +} + +func WatchRemoteConfig() error { return v.WatchRemoteConfig() } +func (v *Viper) WatchRemoteConfig() error { + return v.watchKeyValueConfig() +} + +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() +} + +// Retrieve the first found remote configuration. +func (v *Viper) getKeyValueConfig() error { + if RemoteConfig == nil { + return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") + } + + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + val, err := v.getRemoteConfig(rp) + if err != nil { + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) + + continue + } + + v.kvstore = val + + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { + reader, err := RemoteConfig.Get(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + // Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfig() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + val, err := v.watchRemoteConfig(rp) + if err != nil { + v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) + + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { + reader, err := RemoteConfig.Watch(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index 117c6ac31..2a08074bc 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -12,13 +12,13 @@ package viper import ( "fmt" + "log/slog" "os" "path/filepath" "runtime" "strings" "unicode" - slog "github.com/sagikazarmark/slog-shim" "github.com/spf13/cast" ) @@ -128,15 +128,6 @@ func absPathify(logger *slog.Logger, inPath string) string { return "" } -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - func userHomeDir() string { if runtime.GOOS == "windows" { home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index da68d9944..f900e58b1 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -25,29 +25,22 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/mitchellh/mapstructure" - slog "github.com/sagikazarmark/slog-shim" + "github.com/go-viper/mapstructure/v2" "github.com/spf13/afero" "github.com/spf13/cast" "github.com/spf13/pflag" - "github.com/spf13/viper/internal/encoding" - "github.com/spf13/viper/internal/encoding/dotenv" - "github.com/spf13/viper/internal/encoding/hcl" - "github.com/spf13/viper/internal/encoding/ini" - "github.com/spf13/viper/internal/encoding/javaproperties" - "github.com/spf13/viper/internal/encoding/json" - "github.com/spf13/viper/internal/encoding/toml" - "github.com/spf13/viper/internal/encoding/yaml" "github.com/spf13/viper/internal/features" ) @@ -63,24 +56,10 @@ func (e ConfigMarshalError) Error() string { var v *Viper -type RemoteResponse struct { - Value []byte - Error error -} - func init() { v = New() } -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -// RemoteConfig is optional, see the remote package. -var RemoteConfig remoteConfigFactory - // UnsupportedConfigError denotes encountering an unsupported // configuration filetype. type UnsupportedConfigError string @@ -90,24 +69,6 @@ func (str UnsupportedConfigError) Error() string { return fmt.Sprintf("Unsupported Config Type %q", string(str)) } -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error. -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - // ConfigFileNotFoundError denotes failing to find configuration file. type ConfigFileNotFoundError struct { name, locations string @@ -190,6 +151,8 @@ type Viper struct { // The filesystem to read config from. fs afero.Fs + finder Finder + // A set of remote providers to search for the configuration remoteProviders []*defaultRemoteProvider @@ -200,9 +163,6 @@ type Viper struct { configPermissions os.FileMode envPrefix string - // Specific commands for ini parsing - iniLoadOptions ini.LoadOptions - automaticEnvApplied bool envKeyReplacer StringReplacer allowEmptyEnv bool @@ -221,9 +181,13 @@ type Viper struct { logger *slog.Logger - // TODO: should probably be protected with a mutex - encoderRegistry *encoding.EncoderRegistry - decoderRegistry *encoding.DecoderRegistry + encoderRegistry EncoderRegistry + decoderRegistry DecoderRegistry + + decodeHook mapstructure.DecodeHookFunc + + experimentalFinder bool + experimentalBindStruct bool } // New returns an initialized Viper instance. @@ -244,7 +208,13 @@ func New() *Viper { v.typeByDefValue = false v.logger = slog.New(&discardHandler{}) - v.resetEncoding() + codecRegistry := NewCodecRegistry() + + v.encoderRegistry = codecRegistry + v.decoderRegistry = codecRegistry + + v.experimentalFinder = features.Finder + v.experimentalBindStruct = features.BindStruct return v } @@ -280,10 +250,25 @@ type StringReplacer interface { // EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. func EnvKeyReplacer(r StringReplacer) Option { return optionFunc(func(v *Viper) { + if r == nil { + return + } + v.envKeyReplacer = r }) } +// WithDecodeHook sets a default decode hook for mapstructure. +func WithDecodeHook(h mapstructure.DecodeHookFunc) Option { + return optionFunc(func(v *Viper) { + if h == nil { + return + } + + v.decodeHook = h + }) +} + // NewWithOptions creates a new Viper instance. func NewWithOptions(opts ...Option) *Viper { v := New() @@ -292,138 +277,32 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } - v.resetEncoding() - return v } +// SetOptions sets the options on the global Viper instance. +// +// Be careful when using this function: subsequent calls may override options you set. +// It's always better to use a local Viper instance. +func SetOptions(opts ...Option) { + for _, opt := range opts { + opt.apply(v) + } +} + // Reset is intended for testing, will reset all to default settings. // In the public interface for the viper package so applications // can use it in their testing as well. func Reset() { v = New() SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} -} - -// TODO: make this lazy initialization instead. -func (v *Viper) resetEncoding() { - encoderRegistry := encoding.NewEncoderRegistry() - decoderRegistry := encoding.NewDecoderRegistry() - - { - codec := yaml.Codec{} - - encoderRegistry.RegisterEncoder("yaml", codec) - decoderRegistry.RegisterDecoder("yaml", codec) - - encoderRegistry.RegisterEncoder("yml", codec) - decoderRegistry.RegisterDecoder("yml", codec) - } - - { - codec := json.Codec{} - - encoderRegistry.RegisterEncoder("json", codec) - decoderRegistry.RegisterDecoder("json", codec) - } - - { - codec := toml.Codec{} - - encoderRegistry.RegisterEncoder("toml", codec) - decoderRegistry.RegisterDecoder("toml", codec) - } - - { - codec := hcl.Codec{} - - encoderRegistry.RegisterEncoder("hcl", codec) - decoderRegistry.RegisterDecoder("hcl", codec) - - encoderRegistry.RegisterEncoder("tfvars", codec) - decoderRegistry.RegisterDecoder("tfvars", codec) - } - - { - codec := ini.Codec{ - KeyDelimiter: v.keyDelim, - LoadOptions: v.iniLoadOptions, - } - - encoderRegistry.RegisterEncoder("ini", codec) - decoderRegistry.RegisterDecoder("ini", codec) - } - - { - codec := &javaproperties.Codec{ - KeyDelimiter: v.keyDelim, - } - - encoderRegistry.RegisterEncoder("properties", codec) - decoderRegistry.RegisterDecoder("properties", codec) - - encoderRegistry.RegisterEncoder("props", codec) - decoderRegistry.RegisterDecoder("props", codec) - - encoderRegistry.RegisterEncoder("prop", codec) - decoderRegistry.RegisterDecoder("prop", codec) - } - - { - codec := &dotenv.Codec{} - - encoderRegistry.RegisterEncoder("dotenv", codec) - decoderRegistry.RegisterDecoder("dotenv", codec) - - encoderRegistry.RegisterEncoder("env", codec) - decoderRegistry.RegisterDecoder("env", codec) - } - v.encoderRegistry = encoderRegistry - v.decoderRegistry = decoderRegistry -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string + resetRemote() } // SupportedExts are universally supported extensions. var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} - // OnConfigChange sets the event handler that is called when a config file changes. func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } @@ -574,90 +453,20 @@ func (v *Viper) ConfigFileUsed() string { return v.configFile } func AddConfigPath(in string) { v.AddConfigPath(in) } func (v *Viper) AddConfigPath(in string) { + if v.finder != nil { + v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "AddConfigPath")) + } + if in != "" { absin := absPathify(v.logger, in) v.logger.Info("adding path to search paths", "path", absin) - if !stringInSlice(absin, v.configPaths) { + if !slices.Contains(v.configPaths, absin) { v.configPaths = append(v.configPaths, absin) } } } -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} - -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - // searchMap recursively searches for a value for path in source map. // Returns nil if not found. // Note: This assumes that the path entries and map keys are lower cased. @@ -965,6 +774,7 @@ func (v *Viper) Sub(key string) *Viper { subv.automaticEnvApplied = v.automaticEnvApplied subv.envPrefix = v.envPrefix subv.envKeyReplacer = v.envKeyReplacer + subv.keyDelim = v.keyDelim subv.config = cast.ToStringMap(data) return subv } @@ -1006,6 +816,13 @@ func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } +// GetUint8 returns the value associated with the key as an unsigned integer. +func GetUint8(key string) uint8 { return v.GetUint8(key) } + +func (v *Viper) GetUint8(key string) uint8 { + return cast.ToUint8(v.Get(key)) +} + // GetUint returns the value associated with the key as an unsigned integer. func GetUint(key string) uint { return v.GetUint(key) } @@ -1105,7 +922,7 @@ func UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { } func (v *Viper) UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { - return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) + return decode(v.Get(key), v.defaultDecoderConfig(rawVal, opts...)) } // Unmarshal unmarshals the config into a Struct. Make sure that the tags @@ -1117,7 +934,7 @@ func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { keys := v.AllKeys() - if features.BindStruct { + if v.experimentalBindStruct { // TODO: make this optional? structKeys, err := v.decodeStructKeys(rawVal, opts...) if err != nil { @@ -1128,13 +945,13 @@ func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { } // TODO: struct keys should be enough? - return decode(v.getSettings(keys), defaultDecoderConfig(rawVal, opts...)) + return decode(v.getSettings(keys), v.defaultDecoderConfig(rawVal, opts...)) } func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { var structKeyMap map[string]any - err := decode(input, defaultDecoderConfig(&structKeyMap, opts...)) + err := decode(input, v.defaultDecoderConfig(&structKeyMap, opts...)) if err != nil { return nil, err } @@ -1151,22 +968,54 @@ func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]stri // defaultDecoderConfig returns default mapstructure.DecoderConfig with support // of time.Duration values & string slices. -func defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { +func (v *Viper) defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { + decodeHook := v.decodeHook + if decodeHook == nil { + decodeHook = mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + // mapstructure.StringToSliceHookFunc(","), + stringToWeakSliceHookFunc(","), + ) + } + c := &mapstructure.DecoderConfig{ Metadata: nil, - Result: output, WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), + DecodeHook: decodeHook, } + for _, opt := range opts { opt(c) } + + // Do not allow overwriting the output + c.Result = output + return c } +// As of mapstructure v2.0.0 StringToSliceHookFunc checks if the return type is a string slice. +// This function removes that check. +// TODO: implement a function that checks if the value can be converted to the return type and use it instead. +func stringToWeakSliceHookFunc(sep string) mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + // decode is a wrapper around mapstructure.Decode that mimics the WeakDecode functionality. func decode(input any, config *mapstructure.DecoderConfig) error { decoder, err := mapstructure.NewDecoder(config) @@ -1183,12 +1032,12 @@ func UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { } func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { - config := defaultDecoderConfig(rawVal, opts...) + config := v.defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true keys := v.AllKeys() - if features.BindStruct { + if v.experimentalBindStruct { // TODO: make this optional? structKeys, err := v.decodeStructKeys(rawVal, opts...) if err != nil { @@ -1638,7 +1487,7 @@ func (v *Viper) ReadInConfig() error { return err } - if !stringInSlice(v.getConfigType(), SupportedExts) { + if !slices.Contains(SupportedExts, v.getConfigType()) { return UnsupportedConfigError(v.getConfigType()) } @@ -1669,7 +1518,7 @@ func (v *Viper) MergeInConfig() error { return err } - if !stringInSlice(v.getConfigType(), SupportedExts) { + if !slices.Contains(SupportedExts, v.getConfigType()) { return UnsupportedConfigError(v.getConfigType()) } @@ -1686,6 +1535,10 @@ func (v *Viper) MergeInConfig() error { func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } func (v *Viper) ReadConfig(in io.Reader) error { + if v.configType == "" { + return errors.New("cannot decode configuration: config type is not set") + } + v.config = make(map[string]any) return v.unmarshalReader(in, v.config) } @@ -1694,6 +1547,10 @@ func (v *Viper) ReadConfig(in io.Reader) error { func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } func (v *Viper) MergeConfig(in io.Reader) error { + if v.configType == "" { + return errors.New("cannot decode configuration: config type is not set") + } + cfg := make(map[string]any) if err := v.unmarshalReader(in, cfg); err != nil { return err @@ -1742,6 +1599,19 @@ func (v *Viper) WriteConfigAs(filename string) error { return v.writeConfig(filename, true) } +// WriteConfigTo writes current configuration to an [io.Writer]. +func WriteConfigTo(w io.Writer) error { return v.WriteConfigTo(w) } + +func (v *Viper) WriteConfigTo(w io.Writer) error { + format := strings.ToLower(v.getConfigType()) + + if !slices.Contains(SupportedExts, format) { + return UnsupportedConfigError(format) + } + + return v.marshalWriter(w, format) +} + // SafeWriteConfigAs writes current configuration to a given filename if it does not exist. func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } @@ -1768,7 +1638,7 @@ func (v *Viper) writeConfig(filename string, force bool) error { return fmt.Errorf("config type could not be determined for %s", filename) } - if !stringInSlice(configType, SupportedExts) { + if !slices.Contains(SupportedExts, configType) { return UnsupportedConfigError(configType) } if v.config == nil { @@ -1795,12 +1665,20 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch format := strings.ToLower(v.getConfigType()); format { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": - err := v.decoderRegistry.Decode(format, buf.Bytes(), c) - if err != nil { - return ConfigParseError{err} - } + format := strings.ToLower(v.getConfigType()) + + if !slices.Contains(SupportedExts, format) { + return UnsupportedConfigError(format) + } + + decoder, err := v.decoderRegistry.Decoder(format) + if err != nil { + return ConfigParseError{err} + } + + err = decoder.Decode(buf.Bytes(), c) + if err != nil { + return ConfigParseError{err} } insensitiviseMap(c) @@ -1808,20 +1686,24 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { } // Marshal a map into Writer. -func (v *Viper) marshalWriter(f afero.File, configType string) error { +func (v *Viper) marshalWriter(w io.Writer, configType string) error { c := v.AllSettings() - switch configType { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": - b, err := v.encoderRegistry.Encode(configType, c) - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) - if err != nil { - return ConfigMarshalError{err} - } + encoder, err := v.encoderRegistry.Encoder(configType) + if err != nil { + return ConfigMarshalError{err} } + + b, err := encoder.Encode(c) + if err != nil { + return ConfigMarshalError{err} + } + + _, err = w.Write(b) + if err != nil { + return ConfigMarshalError{err} + } + return nil } @@ -1953,106 +1835,6 @@ func mergeMaps(src, tgt map[string]any, itgt map[any]any) { } } -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } - -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) - - continue - } - - v.kvstore = val - - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - // Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) - - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - // AllKeys returns all keys holding a value, regardless of where they are set. // Nested keys are returned with a v.keyDelim separator. func AllKeys() []string { return v.AllKeys() } @@ -2174,6 +1956,10 @@ func (v *Viper) SetFs(fs afero.Fs) { func SetConfigName(in string) { v.SetConfigName(in) } func (v *Viper) SetConfigName(in string) { + if v.finder != nil { + v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "SetConfigName")) + } + if in != "" { v.configName = in v.configFile = "" @@ -2197,13 +1983,6 @@ func (v *Viper) SetConfigPermissions(perm os.FileMode) { v.configPermissions = perm.Perm() } -// IniLoadOptions sets the load options for ini parsing. -func IniLoadOptions(in ini.LoadOptions) Option { - return optionFunc(func(v *Viper) { - v.iniLoadOptions = in - }) -} - func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go index 374bb0d24..ce57a4c21 100644 --- a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -29,7 +29,7 @@ func run(pass *analysis.Pass) (interface{}, error) { callExpr := node.(*ast.CallExpr) if p, f, ok := getCallExprFunction(callExpr); ok && p == "fmt" && f == "Sprintf" { if err := checkForHostPortConstruction(callExpr); err != nil { - pass.Reportf(node.Pos(), err.Error()) + pass.Reportf(node.Pos(), "%s", err.Error()) } } }) @@ -52,7 +52,7 @@ func getCallExprFunction(callExpr *ast.CallExpr) (pkg string, fn string, result // getStringLiteral returns the value at a position if it's a string literal. func getStringLiteral(args []ast.Expr, pos int) (string, bool) { - if len(args) < pos + 1 { + if len(args) < pos+1 { return "", false } @@ -72,9 +72,9 @@ func getStringLiteral(args []ast.Expr, pos int) (string, bool) { // essentially scheme://%s:, or scheme://user:pass@%s:. // // Matching requirements: -// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) -// - A format string substitution in the host portion, preceded by an optional username/password@ -// - A colon indicating a port will be specified +// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// - A format string substitution in the host portion, preceded by an optional username/password@ +// - A colon indicating a port will be specified func checkForHostPortConstruction(sprintf *ast.CallExpr) error { fs, ok := getStringLiteral(sprintf.Args, 0) if !ok { @@ -93,4 +93,4 @@ func checkForHostPortConstruction(sprintf *ast.CallExpr) error { } return nil -} \ No newline at end of file +} diff --git a/vendor/github.com/tdakkota/asciicheck/asciicheck.go b/vendor/github.com/tdakkota/asciicheck/asciicheck.go index 690728022..2ec141ec3 100644 --- a/vendor/github.com/tdakkota/asciicheck/asciicheck.go +++ b/vendor/github.com/tdakkota/asciicheck/asciicheck.go @@ -3,47 +3,100 @@ package asciicheck import ( "fmt" "go/ast" + "go/token" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" ) func NewAnalyzer() *analysis.Analyzer { return &analysis.Analyzer{ - Name: "asciicheck", - Doc: "checks that all code identifiers does not have non-ASCII symbols in the name", - Run: run, + Name: "asciicheck", + Doc: "checks that all code identifiers does not have non-ASCII symbols in the name", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, } } -func run(pass *analysis.Pass) (interface{}, error) { - for _, file := range pass.Files { - alreadyViewed := map[*ast.Object]struct{}{} - ast.Inspect( - file, func(node ast.Node) bool { - cb(pass, node, alreadyViewed) - return true - }, - ) +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.File)(nil), + (*ast.ImportSpec)(nil), + (*ast.TypeSpec)(nil), + (*ast.ValueSpec)(nil), + (*ast.FuncDecl)(nil), + (*ast.StructType)(nil), + (*ast.FuncType)(nil), + (*ast.InterfaceType)(nil), + (*ast.LabeledStmt)(nil), + (*ast.AssignStmt)(nil), } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.File: + checkIdent(pass, n.Name) + case *ast.ImportSpec: + checkIdent(pass, n.Name) + case *ast.TypeSpec: + checkIdent(pass, n.Name) + checkFieldList(pass, n.TypeParams) + case *ast.ValueSpec: + for _, name := range n.Names { + checkIdent(pass, name) + } + case *ast.FuncDecl: + checkIdent(pass, n.Name) + checkFieldList(pass, n.Recv) + case *ast.StructType: + checkFieldList(pass, n.Fields) + case *ast.FuncType: + checkFieldList(pass, n.TypeParams) + checkFieldList(pass, n.Params) + checkFieldList(pass, n.Results) + case *ast.InterfaceType: + checkFieldList(pass, n.Methods) + case *ast.LabeledStmt: + checkIdent(pass, n.Label) + case *ast.AssignStmt: + if n.Tok == token.DEFINE { + for _, expr := range n.Lhs { + if ident, ok := expr.(*ast.Ident); ok { + checkIdent(pass, ident) + } + } + } + } + }) return nil, nil } -func cb(pass *analysis.Pass, n ast.Node, m map[*ast.Object]struct{}) { - if v, ok := n.(*ast.Ident); ok { - if _, ok := m[v.Obj]; ok { - return - } else { - m[v.Obj] = struct{}{} - } +func checkIdent(pass *analysis.Pass, v *ast.Ident) { + if v == nil { + return + } - ch, ascii := isASCII(v.Name) - if !ascii { - pass.Report( - analysis.Diagnostic{ - Pos: v.Pos(), - Message: fmt.Sprintf("identifier \"%s\" contain non-ASCII character: %#U", v.Name, ch), - }, - ) + ch, ascii := isASCII(v.Name) + if !ascii { + pass.Report( + analysis.Diagnostic{ + Pos: v.Pos(), + Message: fmt.Sprintf("identifier \"%s\" contain non-ASCII character: %#U", v.Name, ch), + }, + ) + } +} + +func checkFieldList(pass *analysis.Pass, f *ast.FieldList) { + if f == nil { + return + } + for _, f := range f.List { + for _, name := range f.Names { + checkIdent(pass, name) } } } diff --git a/vendor/github.com/tetafro/godot/.goreleaser.yml b/vendor/github.com/tetafro/godot/.goreleaser.yml index c0fc2b6b1..2f0c2466a 100644 --- a/vendor/github.com/tetafro/godot/.goreleaser.yml +++ b/vendor/github.com/tetafro/godot/.goreleaser.yml @@ -1,3 +1,5 @@ +version: 2 + builds: - dir: ./cmd/godot checksum: diff --git a/vendor/github.com/tetafro/godot/README.md b/vendor/github.com/tetafro/godot/README.md index 6b2e530b9..15a85778e 100644 --- a/vendor/github.com/tetafro/godot/README.md +++ b/vendor/github.com/tetafro/godot/README.md @@ -36,6 +36,7 @@ defaults are used: # Which comments to check: # declarations - for top level declaration comments (default); # toplevel - for top level comments; +# noinline - for all except inline comments; # all - for all comments. scope: declarations diff --git a/vendor/github.com/tetafro/godot/getters.go b/vendor/github.com/tetafro/godot/getters.go index de3d06e10..fda0619f4 100644 --- a/vendor/github.com/tetafro/godot/getters.go +++ b/vendor/github.com/tetafro/godot/getters.go @@ -6,6 +6,7 @@ import ( "go/ast" "go/token" "os" + "path/filepath" "regexp" "strings" ) @@ -36,36 +37,22 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { file: file, } - var err error - // Read original file. This is necessary for making a replacements for // inline comments. I couldn't find a better way to get original line // with code and comment without reading the file. Function `Format` // from "go/format" won't help here if the original file is not gofmt-ed. - pf.lines, err = readFile(file, fset) - if err != nil { - return nil, fmt.Errorf("read file: %w", err) - } - // Dirty hack. For some cases Go generates temporary files during - // compilation process if there is a cgo block in the source file. Some of - // these temporary files are just copies of original source files but with - // new generated comments at the top. Because of them the content differs - // from AST. For some reason it differs only in golangci-lint. I failed to - // find out the exact description of the process, so let's just skip files - // generated by cgo. - if isCgoGenerated(pf.lines) { - return nil, errUnsuitableInput + filename := getFilename(fset, file) + + if !strings.HasSuffix(filename, ".go") { + return nil, errEmptyInput } - // Check consistency to avoid checking slice indexes in each function. - // Note that `PositionFor` is used with `adjusted=false` to skip `//line` - // directives that can set references to other files (e.g. templates) - // instead of the real ones, and break consistency here. - // Issue: https://github.com/tetafro/godot/issues/32 - lastComment := pf.file.Comments[len(pf.file.Comments)-1] - if p := pf.fset.PositionFor(lastComment.End(), false); len(pf.lines) < p.Line { - return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) + var err error + + pf.lines, err = readFile(filename) + if err != nil { + return nil, fmt.Errorf("read file: %w", err) } return &pf, nil @@ -79,6 +66,9 @@ func (pf *parsedFile) getComments(scope Scope, exclude []*regexp.Regexp) []comme case AllScope: // All comments comments = pf.getAllComments(exclude) + case NoInlineScope: + // All except inline comments + comments = pf.getNoInline(exclude) case TopLevelScope: // All top level comments and comments from the inside // of top level blocks @@ -186,6 +176,34 @@ func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment return comments } +// getNoInline gets all except inline comments. +func (pf *parsedFile) getNoInline(exclude []*regexp.Regexp) []comment { + var comments []comment //nolint:prealloc + for _, c := range pf.file.Comments { + if c == nil || len(c.List) == 0 { + continue + } + firstLine := pf.fset.Position(c.Pos()).Line + lastLine := pf.fset.Position(c.End()).Line + + c := comment{ + lines: pf.lines[firstLine-1 : lastLine], + start: pf.fset.Position(c.List[0].Slash), + text: getText(c, exclude), + } + + // Skip inline + if len(c.lines) == 1 { + before := c.lines[0][:c.start.Column-1] + if len(strings.TrimSpace(before)) > 0 { + continue + } + } + comments = append(comments, c) + } + return comments +} + // getAllComments gets every single comment from the file. func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { var comments []comment //nolint:prealloc @@ -244,12 +262,12 @@ func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { } // readFile reads file and returns its lines as strings. -func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { - fname := fset.File(file.Package) - f, err := os.ReadFile(fname.Name()) +func readFile(filename string) ([]string, error) { + f, err := os.ReadFile(filepath.Clean(filename)) if err != nil { return nil, err //nolint:wrapcheck } + return strings.Split(string(f), "\n"), nil } @@ -275,11 +293,11 @@ func matchAny(s string, rr []*regexp.Regexp) bool { return false } -func isCgoGenerated(lines []string) bool { - for i := range lines { - if strings.Contains(lines[i], "Code generated by cmd/cgo") { - return true - } +func getFilename(fset *token.FileSet, file *ast.File) string { + filename := fset.PositionFor(file.Pos(), true).Filename + if !strings.HasSuffix(filename, ".go") { + return fset.PositionFor(file.Pos(), false).Filename } - return false + + return filename } diff --git a/vendor/github.com/tetafro/godot/settings.go b/vendor/github.com/tetafro/godot/settings.go index b71bf5d58..d770cbcf7 100644 --- a/vendor/github.com/tetafro/godot/settings.go +++ b/vendor/github.com/tetafro/godot/settings.go @@ -24,6 +24,8 @@ const ( DeclScope Scope = "declarations" // TopLevelScope is for all top level comments. TopLevelScope Scope = "toplevel" + // NoInlineScope is for all except inline comments. + NoInlineScope Scope = "noinline" // AllScope is for all comments. AllScope Scope = "all" ) diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go index 21bb485b4..ae860d728 100644 --- a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go +++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -23,7 +23,7 @@ var Analyzer = &analysis.Analyzer{ } const ( - Doc = "bodyclose checks whether HTTP response body is closed successfully" + Doc = "checks whether HTTP response body is closed successfully" nethttpPath = "net/http" closeMethod = "Close" @@ -114,6 +114,18 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { if len(*call.Referrers()) == 0 { return true } + + if instr, ok := b.Instrs[i].(*ssa.Call); ok { + // httptest.ResponseRecorder is not needed closing the response body because no-op. + if callee := instr.Call.StaticCallee(); callee != nil && callee.Name() == "Result" { + if callee.Pkg != nil && callee.Pkg.Pkg.Name() == "httptest" { + if recv := callee.Signature.Recv(); recv != nil && recv.Type().String() == "*net/http/httptest.ResponseRecorder" { + return false + } + } + } + } + cRefs := *call.Referrers() for _, cRef := range cRefs { val, ok := r.getResVal(cRef) @@ -149,6 +161,22 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { return r.calledInFunc(f, called) } + // Case when calling Close() from struct field or method + if s, ok := aref.(*ssa.Store); ok { + if f, ok := s.Addr.(*ssa.FieldAddr); ok { + for _, bRef := range f.Block().Instrs { + bOp, ok := r.getBodyOp(bRef) + if !ok { + continue + } + for _, ccall := range *bOp.Referrers() { + if r.isCloseCall(ccall) { + return false + } + } + } + } + } } case *ssa.Call, *ssa.Defer: // Indirect function call // Hacky way to extract CommonCall @@ -195,6 +223,34 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } + case *ssa.Phi: // Called in the higher-level block + if resRef.Referrers() == nil { + return true + } + + bRefs := *resRef.Referrers() + + for _, bRef := range bRefs { + switch instr := bRef.(type) { + case *ssa.FieldAddr: + bRefs := *instr.Referrers() + for _, bRef := range bRefs { + bOp, ok := r.getBodyOp(bRef) + if !ok { + continue + } + if len(*bOp.Referrers()) == 0 { + return true + } + ccalls := *bOp.Referrers() + for _, ccall := range ccalls { + if r.isCloseCall(ccall) { + return false + } + } + } + } + } } } } @@ -207,7 +263,9 @@ func (r *runner) getReqCall(instr ssa.Instruction) (*ssa.Call, bool) { if !ok { return nil, false } - if !strings.Contains(call.Type().String(), r.resTyp.String()) { + callType := call.Type().String() + if !strings.Contains(callType, r.resTyp.String()) || + strings.Contains(callType, "net/http.ResponseController") { return nil, false } return call, true diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go index 2c1b2d20b..127f7efd8 100644 --- a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go +++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -44,6 +44,10 @@ type WrapcheckConfig struct { // list to your config. IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` + // ExtraIgnoreSigs defines an additional list of signatures to ignore, on + // top of IgnoreSigs. + ExtraIgnoreSigs []string `mapstructure:"extraIgnoreSigs" yaml:"extraIgnoreSigs"` + // IgnoreSigRegexps defines a list of regular expressions which if matched // to the signature of the function call returning the error, will be ignored. This // allows you to specify functions that wrapcheck will not report as @@ -276,7 +280,7 @@ func reportUnwrapped( // Check for ignored signatures fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() - if contains(cfg.IgnoreSigs, fnSig) { + if contains(cfg.IgnoreSigs, fnSig) || contains(cfg.ExtraIgnoreSigs, fnSig) { return } else if containsMatch(regexpsSig, fnSig) { return diff --git a/vendor/github.com/ultraware/funlen/.golangci.yml b/vendor/github.com/ultraware/funlen/.golangci.yml new file mode 100644 index 000000000..600bef78e --- /dev/null +++ b/vendor/github.com/ultraware/funlen/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md index af2187694..8bbe7eab6 100644 --- a/vendor/github.com/ultraware/funlen/README.md +++ b/vendor/github.com/ultraware/funlen/README.md @@ -16,6 +16,26 @@ The default values are used internally, but might to be adjusted for your specif Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. +## Configuration + +Available configuration options: + +```yaml +linters-settings: + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 60 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 60 + # Ignore comments when counting lines. + # Default false + ignore-comments: false +``` + # Exclude for tests golangci-lint offers a way to exclude linters in certain cases. More info can be found here: https://golangci-lint.run/usage/configuration/#issues-configuration. diff --git a/vendor/github.com/ultraware/funlen/funlen.go b/vendor/github.com/ultraware/funlen/funlen.go new file mode 100644 index 000000000..9838f7cc4 --- /dev/null +++ b/vendor/github.com/ultraware/funlen/funlen.go @@ -0,0 +1,115 @@ +package funlen + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +const ( + defaultLineLimit = 60 + defaultStmtLimit = 40 +) + +func NewAnalyzer(lineLimit int, stmtLimit int, ignoreComments bool) *analysis.Analyzer { + if lineLimit == 0 { + lineLimit = defaultLineLimit + } + + if stmtLimit == 0 { + stmtLimit = defaultStmtLimit + } + + return &analysis.Analyzer{ + Name: "funlen", + Doc: "Checks for long functions.", + URL: "https://github.com/ultraware/funlen", + Run: func(pass *analysis.Pass) (any, error) { + run(pass, lineLimit, stmtLimit, ignoreComments) + return nil, nil + }, + } +} + +func run(pass *analysis.Pass, lineLimit int, stmtLimit int, ignoreComments bool) { + for _, file := range pass.Files { + cmap := ast.NewCommentMap(pass.Fset, file, file.Comments) + + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + if stmtLimit > 0 { + if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { + pass.Reportf(decl.Name.Pos(), "Function '%s' has too many statements (%d > %d)", decl.Name.Name, stmts, stmtLimit) + continue + } + } + + if lineLimit > 0 { + if lines := getLines(pass.Fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { + pass.Reportf(decl.Name.Pos(), "Function '%s' is too long (%d > %d)", decl.Name.Name, lines, lineLimit) + } + } + } + } +} + +func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { + lineCount := fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 + + if !ignoreComments { + return lineCount + } + + var commentCount int + + for _, c := range cmap.Comments() { + // If the CommentGroup's lines are inside the function + // count how many comments are in the CommentGroup + if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && + (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { + commentCount += len(c.List) + } + } + + return lineCount - commentCount +} + +func parseStmts(s []ast.Stmt) (total int) { + for _, v := range s { + total++ + switch stmt := v.(type) { + case *ast.BlockStmt: + total += parseStmts(stmt.List) - 1 + case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, + *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + total += parseBodyListStmts(stmt) + case *ast.CaseClause: + total += parseStmts(stmt.Body) + case *ast.AssignStmt: + total += checkInlineFunc(stmt.Rhs[0]) + case *ast.GoStmt: + total += checkInlineFunc(stmt.Call.Fun) + case *ast.DeferStmt: + total += checkInlineFunc(stmt.Call.Fun) + } + } + return +} + +func checkInlineFunc(stmt ast.Expr) int { + if block, ok := stmt.(*ast.FuncLit); ok { + return parseStmts(block.Body.List) + } + return 0 +} + +func parseBodyListStmts(t any) int { + i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() + return parseStmts(i.([]ast.Stmt)) +} diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go deleted file mode 100644 index b68ddb926..000000000 --- a/vendor/github.com/ultraware/funlen/main.go +++ /dev/null @@ -1,124 +0,0 @@ -package funlen - -import ( - "fmt" - "go/ast" - "go/token" - "reflect" -) - -const ( - defaultLineLimit = 60 - defaultStmtLimit = 40 -) - -// Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, lineLimit int, stmtLimit int, ignoreComments bool) []Message { - if lineLimit == 0 { - lineLimit = defaultLineLimit - } - if stmtLimit == 0 { - stmtLimit = defaultStmtLimit - } - - cmap := ast.NewCommentMap(fset, file, file.Comments) - - var msgs []Message - for _, f := range file.Decls { - decl, ok := f.(*ast.FuncDecl) - if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo - continue - } - - if stmtLimit > 0 { - if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { - msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit)) - continue - } - } - - if lineLimit > 0 { - if lines := getLines(fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { - msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) - } - } - } - - return msgs -} - -// Message contains a message -type Message struct { - Pos token.Position - Message string -} - -func makeLineMessage(fset *token.FileSet, funcInfo *ast.Ident, lines, lineLimit int) Message { - return Message{ - fset.Position(funcInfo.Pos()), - fmt.Sprintf("Function '%s' is too long (%d > %d)\n", funcInfo.Name, lines, lineLimit), - } -} - -func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit int) Message { - return Message{ - fset.Position(funcInfo.Pos()), - fmt.Sprintf("Function '%s' has too many statements (%d > %d)\n", funcInfo.Name, stmts, stmtLimit), - } -} - -func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { // nolint: interfacer - var lineCount int - var commentCount int - - lineCount = fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 - - if !ignoreComments { - return lineCount - } - - for _, c := range cmap.Comments() { - // If the CommenGroup's lines are inside the function - // count how many comments are in the CommentGroup - if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && - (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { - commentCount += len(c.List) - } - } - - return lineCount - commentCount -} - -func parseStmts(s []ast.Stmt) (total int) { - for _, v := range s { - total++ - switch stmt := v.(type) { - case *ast.BlockStmt: - total += parseStmts(stmt.List) - 1 - case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, - *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: - total += parseBodyListStmts(stmt) - case *ast.CaseClause: - total += parseStmts(stmt.Body) - case *ast.AssignStmt: - total += checkInlineFunc(stmt.Rhs[0]) - case *ast.GoStmt: - total += checkInlineFunc(stmt.Call.Fun) - case *ast.DeferStmt: - total += checkInlineFunc(stmt.Call.Fun) - } - } - return -} - -func checkInlineFunc(stmt ast.Expr) int { - if block, ok := stmt.(*ast.FuncLit); ok { - return parseStmts(block.Body.List) - } - return 0 -} - -func parseBodyListStmts(t interface{}) int { - i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() - return parseStmts(i.([]ast.Stmt)) -} diff --git a/vendor/github.com/ultraware/whitespace/README.md b/vendor/github.com/ultraware/whitespace/README.md index f99ecce36..660c13d78 100644 --- a/vendor/github.com/ultraware/whitespace/README.md +++ b/vendor/github.com/ultraware/whitespace/README.md @@ -4,6 +4,6 @@ Whitespace is a linter that checks for unnecessary newlines at the start and end ## Installation guide -To install as a standalone linter, run `go install github.com/ultraware/whitespace`. +To install as a standalone linter, run `go install github.com/ultraware/whitespace/cmd/whitespace@latest`. Whitespace is also included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. diff --git a/vendor/github.com/ultraware/whitespace/whitespace.go b/vendor/github.com/ultraware/whitespace/whitespace.go index 350e9b7e4..44e68124c 100644 --- a/vendor/github.com/ultraware/whitespace/whitespace.go +++ b/vendor/github.com/ultraware/whitespace/whitespace.go @@ -9,53 +9,8 @@ import ( "golang.org/x/tools/go/analysis" ) -// MessageType describes what should happen to fix the warning. -type MessageType uint8 - -// List of MessageTypes. -const ( - MessageTypeRemove MessageType = iota + 1 - MessageTypeAdd -) - -// RunningMode describes the mode the linter is run in. This can be either -// native or golangci-lint. -type RunningMode uint8 - -const ( - RunningModeNative RunningMode = iota - RunningModeGolangCI -) - -// Message contains a message and diagnostic information. -type Message struct { - // Diagnostic is what position the diagnostic should be put at. This isn't - // always the same as the fix start, f.ex. when we fix trailing newlines we - // put the diagnostic at the right bracket but we fix between the end of the - // last statement and the bracket. - Diagnostic token.Pos - - // FixStart is the span start of the fix. - FixStart token.Pos - - // FixEnd is the span end of the fix. - FixEnd token.Pos - - // LineNumbers represent the actual line numbers in the file. This is set - // when finding the diagnostic to make it easier to suggest fixes in - // golangci-lint. - LineNumbers []int - - // MessageType represents the type of message it is. - MessageType MessageType - - // Message is the diagnostic to show. - Message string -} - // Settings contains settings for edge-cases. type Settings struct { - Mode RunningMode MultiIf bool MultiFunc bool } @@ -86,47 +41,24 @@ func flags(settings *Settings) flag.FlagSet { return *flags } -func Run(pass *analysis.Pass, settings *Settings) []Message { - messages := []Message{} - +func Run(pass *analysis.Pass, settings *Settings) { for _, file := range pass.Files { filename := pass.Fset.Position(file.Pos()).Filename + if !strings.HasSuffix(filename, ".go") { continue } fileMessages := runFile(file, pass.Fset, *settings) - if settings.Mode == RunningModeGolangCI { - messages = append(messages, fileMessages...) - continue - } - for _, message := range fileMessages { - pass.Report(analysis.Diagnostic{ - Pos: message.Diagnostic, - Category: "whitespace", - Message: message.Message, - SuggestedFixes: []analysis.SuggestedFix{ - { - TextEdits: []analysis.TextEdit{ - { - Pos: message.FixStart, - End: message.FixEnd, - NewText: []byte("\n"), - }, - }, - }, - }, - }) + pass.Report(message) } } - - return messages } -func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { - var messages []Message +func runFile(file *ast.File, fset *token.FileSet, settings Settings) []analysis.Diagnostic { + var messages []analysis.Diagnostic for _, f := range file.Decls { decl, ok := f.(*ast.FuncDecl) @@ -146,7 +78,7 @@ func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { type visitor struct { comments []*ast.CommentGroup fset *token.FileSet - messages []Message + messages []analysis.Diagnostic wantNewline map[*ast.BlockStmt]bool settings Settings } @@ -180,13 +112,16 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { startMsg := checkStart(v.fset, opening, first) if wantNewline && startMsg == nil && len(stmt.List) >= 1 { - v.messages = append(v.messages, Message{ - Diagnostic: opening, - FixStart: stmt.List[0].Pos(), - FixEnd: stmt.List[0].Pos(), - LineNumbers: []int{v.fset.PositionFor(stmt.List[0].Pos(), false).Line}, - MessageType: MessageTypeAdd, - Message: "multi-line statement should be followed by a newline", + v.messages = append(v.messages, analysis.Diagnostic{ + Pos: opening, + Message: "multi-line statement should be followed by a newline", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: stmt.List[0].Pos(), + End: stmt.List[0].Pos(), + NewText: []byte("\n"), + }}, + }}, }) } else if !wantNewline && startMsg != nil { v.messages = append(v.messages, *startMsg) @@ -209,7 +144,7 @@ func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { } func posLine(fset *token.FileSet, pos token.Pos) int { - return fset.PositionFor(pos, false).Line + return fset.Position(pos).Line } func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.BlockStmt) (token.Pos, ast.Node, ast.Node) { @@ -256,52 +191,46 @@ func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.B return openingPos, first, last } -func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { +func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *analysis.Diagnostic { if first == nil { return nil } if posLine(fset, start)+1 < posLine(fset, first.Pos()) { - return &Message{ - Diagnostic: start, - FixStart: start, - FixEnd: first.Pos(), - LineNumbers: linesBetween(fset, start, first.Pos()), - MessageType: MessageTypeRemove, - Message: "unnecessary leading newline", + return &analysis.Diagnostic{ + Pos: start, + Message: "unnecessary leading newline", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: first.Pos(), + NewText: []byte("\n"), + }}, + }}, } } return nil } -func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { +func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *analysis.Diagnostic { if last == nil { return nil } if posLine(fset, end)-1 > posLine(fset, last.End()) { - return &Message{ - Diagnostic: end, - FixStart: last.End(), - FixEnd: end, - LineNumbers: linesBetween(fset, last.End(), end), - MessageType: MessageTypeRemove, - Message: "unnecessary trailing newline", + return &analysis.Diagnostic{ + Pos: end, + Message: "unnecessary trailing newline", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: last.End(), + End: end, + NewText: []byte("\n"), + }}, + }}, } } return nil } - -func linesBetween(fset *token.FileSet, a, b token.Pos) []int { - lines := []int{} - aPosition := fset.PositionFor(a, false) - bPosition := fset.PositionFor(b, false) - - for i := aPosition.Line + 1; i < bPosition.Line; i++ { - lines = append(lines, i) - } - - return lines -} diff --git a/vendor/github.com/uudashr/gocognit/README.md b/vendor/github.com/uudashr/gocognit/README.md index c15f61bcf..415e81e73 100644 --- a/vendor/github.com/uudashr/gocognit/README.md +++ b/vendor/github.com/uudashr/gocognit/README.md @@ -147,14 +147,14 @@ The following structures receive a nesting increment commensurate with their nes ## Installation -``` -$ go install github.com/uudashr/gocognit/cmd/gocognit@latest +```shell +go install github.com/uudashr/gocognit/cmd/gocognit@latest ``` or -``` -$ go get github.com/uudashr/gocognit/cmd/gocognit +```shell +go get github.com/uudashr/gocognit/cmd/gocognit ``` ## Usage @@ -169,14 +169,17 @@ Usage: Flags: - -over N show functions with complexity > N only - and return exit code 1 if the output is non-empty - -top N show the top N most complex functions only - -avg show the average complexity over all functions, - not depending on whether -over or -top are set - -json encode the output as JSON - -f format string the format to use - (default "{{.PkgName}}.{{.FuncName}}:{{.Complexity}}:{{.Pos}}") + -over N show functions with complexity > N only + and return exit code 1 if the output is non-empty + -top N show the top N most complex functions only + -avg show the average complexity over all functions, + not depending on whether -over or -top are set + -test indicates whether test files should be included + -json encode the output as JSON + -d enable diagnostic output + -f format string the format to use + (default "{{.Complexity}} {{.PkgName}} {{.FuncName}} {{.Pos}}") + -ignore expr ignore files matching the given regexp The (default) output fields for each line are: @@ -191,10 +194,24 @@ or equal to The struct being passed to the template is: type Stat struct { - PkgName string - FuncName string - Complexity int - Pos token.Position + PkgName string + FuncName string + Complexity int + Pos token.Position + Diagnostics []Diagnostics + } + + type Diagnostic struct { + Inc string + Nesting int + Text string + Pos DiagnosticPosition + } + + type DiagnosticPosition struct { + Offset int + Line int + Column int } ``` @@ -223,6 +240,76 @@ func IgnoreMe() { } ``` +## Diagnostic +To understand how the complexity are calculated, we can enable the diagnostic by using `-d` flag. + +Example: +```shell +$ gocognit -json -d . +``` + +It will show the diagnostic output in JSON format +
+ +JSON Output + +```json +[ + { + "PkgName": "prime", + "FuncName": "SumOfPrimes", + "Complexity": 7, + "Pos": { + "Filename": "prime.go", + "Offset": 15, + "Line": 3, + "Column": 1 + }, + "Diagnostics": [ + { + "Inc": 1, + "Text": "for", + "Pos": { + "Offset": 69, + "Line": 7, + "Column": 2 + } + }, + { + "Inc": 2, + "Nesting": 1, + "Text": "for", + "Pos": { + "Offset": 104, + "Line": 8, + "Column": 3 + } + }, + { + "Inc": 3, + "Nesting": 2, + "Text": "if", + "Pos": { + "Offset": 152, + "Line": 9, + "Column": 4 + } + }, + { + "Inc": 1, + "Text": "continue", + "Pos": { + "Offset": 190, + "Line": 10, + "Column": 5 + } + } + ] + } +] +``` +
+ ## Related project - [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. - [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. diff --git a/vendor/github.com/uudashr/gocognit/doc.go b/vendor/github.com/uudashr/gocognit/doc.go index ae3d0a226..797b19228 100644 --- a/vendor/github.com/uudashr/gocognit/doc.go +++ b/vendor/github.com/uudashr/gocognit/doc.go @@ -1,2 +1,3 @@ -// Package gocognit defines Analyzer other utilities to checks and calculate the complexity of function based on "cognitive complexity" methods. +// Package gocognit defines Analyzer other utilities to checks and calculate +// the complexity of function based on "cognitive complexity" methods. package gocognit diff --git a/vendor/github.com/uudashr/gocognit/gocognit.go b/vendor/github.com/uudashr/gocognit/gocognit.go index 2bba2eb4f..e51ee2a04 100644 --- a/vendor/github.com/uudashr/gocognit/gocognit.go +++ b/vendor/github.com/uudashr/gocognit/gocognit.go @@ -4,6 +4,7 @@ import ( "fmt" "go/ast" "go/token" + "strconv" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" @@ -12,10 +13,58 @@ import ( // Stat is statistic of the complexity. type Stat struct { - PkgName string - FuncName string - Complexity int - Pos token.Position + PkgName string + FuncName string + Complexity int + Pos token.Position + Diagnostics []Diagnostic `json:",omitempty"` +} + +// Diagnostic contains information how the complexity increase. +type Diagnostic struct { + Inc int + Nesting int `json:",omitempty"` + Text string + Pos DiagnosticPosition +} + +// DiagnosticPosition is the position of the diagnostic. +type DiagnosticPosition struct { + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (byte count) +} + +func (pos DiagnosticPosition) isValid() bool { + return pos.Line > 0 +} + +func (pos DiagnosticPosition) String() string { + var s string + if pos.isValid() { + if s != "" { + s += ":" + } + + s += strconv.Itoa(pos.Line) + if pos.Column != 0 { + s += fmt.Sprintf(":%d", pos.Column) + } + } + + if s == "" { + s = "-" + } + + return s +} + +func (d Diagnostic) String() string { + if d.Nesting == 0 { + return fmt.Sprintf("+%d", d.Inc) + } + + return fmt.Sprintf("+%d (nesting=%d)", d.Inc, d.Nesting) } func (s Stat) String() string { @@ -24,6 +73,11 @@ func (s Stat) String() string { // ComplexityStats builds the complexity statistics. func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { + return ComplexityStatsWithDiagnostic(f, fset, stats, false) +} + +// ComplexityStatsWithDiagnostic builds the complexity statistics with diagnostic. +func ComplexityStatsWithDiagnostic(f *ast.File, fset *token.FileSet, stats []Stat, enableDiagnostics bool) []Stat { for _, decl := range f.Decls { if fn, ok := decl.(*ast.FuncDecl); ok { d := parseDirective(fn.Doc) @@ -31,17 +85,43 @@ func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { continue } + res := ScanComplexity(fn, enableDiagnostics) + stats = append(stats, Stat{ - PkgName: f.Name.Name, - FuncName: funcName(fn), - Complexity: Complexity(fn), - Pos: fset.Position(fn.Pos()), + PkgName: f.Name.Name, + FuncName: funcName(fn), + Complexity: res.Complexity, + Diagnostics: generateDiagnostics(fset, res.Diagnostics), + Pos: fset.Position(fn.Pos()), }) } } + return stats } +func generateDiagnostics(fset *token.FileSet, diags []diagnostic) []Diagnostic { + out := make([]Diagnostic, 0, len(diags)) + + for _, diag := range diags { + pos := fset.Position(diag.Pos) + diagPos := DiagnosticPosition{ + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + out = append(out, Diagnostic{ + Inc: diag.Inc, + Nesting: diag.Nesting, + Text: diag.Text, + Pos: diagPos, + }) + } + + return out +} + type directive struct { Ignore bool } @@ -66,20 +146,46 @@ func funcName(fn *ast.FuncDecl) string { if fn.Recv != nil { if fn.Recv.NumFields() > 0 { typ := fn.Recv.List[0].Type + return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) } } + return fn.Name.Name } // Complexity calculates the cognitive complexity of a function. func Complexity(fn *ast.FuncDecl) int { + res := ScanComplexity(fn, false) + + return res.Complexity +} + +// ScanComplexity scans the function declaration. +func ScanComplexity(fn *ast.FuncDecl, includeDiagnostics bool) ScanResult { v := complexityVisitor{ - name: fn.Name, + name: fn.Name, + diagnosticsEnabled: includeDiagnostics, } ast.Walk(&v, fn) - return v.complexity + + return ScanResult{ + Diagnostics: v.diagnostics, + Complexity: v.complexity, + } +} + +type ScanResult struct { + Diagnostics []diagnostic + Complexity int +} + +type diagnostic struct { + Inc int + Nesting int + Text string + Pos token.Pos } type complexityVisitor struct { @@ -88,6 +194,9 @@ type complexityVisitor struct { nesting int elseNodes map[ast.Node]bool calculatedExprs map[ast.Expr]bool + + diagnosticsEnabled bool + diagnostics []diagnostic } func (v *complexityVisitor) incNesting() { @@ -98,12 +207,33 @@ func (v *complexityVisitor) decNesting() { v.nesting-- } -func (v *complexityVisitor) incComplexity() { +func (v *complexityVisitor) incComplexity(text string, pos token.Pos) { v.complexity++ + + if !v.diagnosticsEnabled { + return + } + + v.diagnostics = append(v.diagnostics, diagnostic{ + Inc: 1, + Text: text, + Pos: pos, + }) } -func (v *complexityVisitor) nestIncComplexity() { +func (v *complexityVisitor) nestIncComplexity(text string, pos token.Pos) { v.complexity += (v.nesting + 1) + + if !v.diagnosticsEnabled { + return + } + + v.diagnostics = append(v.diagnostics, diagnostic{ + Inc: v.nesting + 1, + Nesting: v.nesting, + Text: text, + Pos: pos, + }) } func (v *complexityVisitor) markAsElseNode(n ast.Node) { @@ -162,11 +292,12 @@ func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { case *ast.CallExpr: return v.visitCallExpr(n) } + return v } func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { - v.incIfComplexity(n) + v.incIfComplexity(n, "if", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -174,17 +305,12 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { ast.Walk(v, n.Cond) - pure := !v.markedAsElseNode(n) // pure `if` statement, not an `else if` - if pure { - v.incNesting() - ast.Walk(v, n.Body) - v.decNesting() - } else { - ast.Walk(v, n.Body) - } + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() if _, ok := n.Else.(*ast.BlockStmt); ok { - v.incComplexity() + v.incComplexity("else", n.Else.Pos()) ast.Walk(v, n.Else) } else if _, ok := n.Else.(*ast.IfStmt); ok { @@ -196,7 +322,7 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { } func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("switch", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -209,11 +335,12 @@ func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("switch", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -226,20 +353,22 @@ func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visit v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitSelectStmt(n *ast.SelectStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("select", n.Pos()) v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("for", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -256,11 +385,12 @@ func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("for", n.Pos()) if n := n.Key; n != nil { ast.Walk(v, n) @@ -275,6 +405,7 @@ func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } @@ -284,13 +415,15 @@ func (v *complexityVisitor) visitFuncLit(n *ast.FuncLit) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { if n.Label != nil { - v.incComplexity() + v.incComplexity(n.Tok.String(), n.Pos()) } + return v } @@ -301,11 +434,12 @@ func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { var lastOp token.Token for _, op := range ops { if lastOp != op { - v.incComplexity() + v.incComplexity(op.String(), n.OpPos) lastOp = op } } } + return v } @@ -314,34 +448,38 @@ func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { obj, name := callIdent.Obj, callIdent.Name if obj == v.name.Obj && name == v.name.Name { // called by same function directly (direct recursion) - v.incComplexity() + v.incComplexity(name, n.Pos()) } } + return v } func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { v.markCalculated(exp) + if exp, ok := exp.(*ast.BinaryExpr); ok { return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) } return nil } -func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { +func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt, text string, pos token.Pos) { if v.markedAsElseNode(n) { - v.incComplexity() + v.incComplexity(text, pos) } else { - v.nestIncComplexity() + v.nestIncComplexity(text, pos) } } func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { var out []token.Token out = append(out, x...) + if isBinaryLogicalOp(op) { out = append(out, op) } + out = append(out, y...) return out } diff --git a/vendor/github.com/uudashr/gocognit/recv.go b/vendor/github.com/uudashr/gocognit/recv.go index 2f20d843a..eaf3c9762 100644 --- a/vendor/github.com/uudashr/gocognit/recv.go +++ b/vendor/github.com/uudashr/gocognit/recv.go @@ -20,5 +20,6 @@ func recvString(recv ast.Expr) string { case *ast.IndexListExpr: return recvString(t.X) } + return "BADRECV" } diff --git a/vendor/github.com/uudashr/gocognit/recv_pre118.go b/vendor/github.com/uudashr/gocognit/recv_pre118.go index 9e0ebfd82..a47ba1bd5 100644 --- a/vendor/github.com/uudashr/gocognit/recv_pre118.go +++ b/vendor/github.com/uudashr/gocognit/recv_pre118.go @@ -16,5 +16,6 @@ func recvString(recv ast.Expr) string { case *ast.StarExpr: return "*" + recvString(t.X) } + return "BADRECV" } diff --git a/vendor/github.com/uudashr/iface/internal/directive/directive.go b/vendor/github.com/uudashr/iface/internal/directive/directive.go index 05c62928e..f7c3ee349 100644 --- a/vendor/github.com/uudashr/iface/internal/directive/directive.go +++ b/vendor/github.com/uudashr/iface/internal/directive/directive.go @@ -6,7 +6,7 @@ import ( "strings" ) -// Ignore represent a special instruction embebded in the source code. +// Ignore represent a special instruction embedded in the source code. // // The directive can be as simple as // diff --git a/vendor/github.com/uudashr/iface/opaque/opaque.go b/vendor/github.com/uudashr/iface/opaque/opaque.go index f8b7bf4c6..500249d77 100644 --- a/vendor/github.com/uudashr/iface/opaque/opaque.go +++ b/vendor/github.com/uudashr/iface/opaque/opaque.go @@ -157,12 +157,15 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { } typ := pass.TypesInfo.TypeOf(res) + isNilStmt := isUntypedNil(typ) if r.debug { - fmt.Printf(" Ident type: %v %v interface=%t\n", typ, reflect.TypeOf(typ), types.IsInterface(typ)) + fmt.Printf(" Ident type: %v %v interface=%t, untypedNil=%t\n", typ, reflect.TypeOf(typ), types.IsInterface(typ), isNilStmt) } - retStmtTypes[i][typ] = struct{}{} + if !isNilStmt { + retStmtTypes[i][typ] = struct{}{} + } case *ast.UnaryExpr: if r.debug { fmt.Printf(" UnaryExpr X: %v \n", res.X) @@ -274,18 +277,42 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { stmtTypName = removePkgPrefix(stmtTypName) } - pass.Reportf(result.Pos(), - "%s function return %s interface at the %s result, abstract a single concrete implementation of %s", + msg := fmt.Sprintf("%s function return %s interface at the %s result, abstract a single concrete implementation of %s", funcDecl.Name.Name, retTypeName, positionStr(currentIdx), stmtTypName) + + pass.Report(analysis.Diagnostic{ + Pos: result.Pos(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Replace the interface return type with the concrete type", + TextEdits: []analysis.TextEdit{ + { + Pos: result.Pos(), + End: result.End(), + NewText: []byte(stmtTypName), + }, + }, + }, + }, + }) } }) return nil, nil } +func isUntypedNil(typ types.Type) bool { + if b, ok := typ.(*types.Basic); ok { + return b.Kind() == types.UntypedNil + } + + return false +} + func positionStr(idx int) string { switch idx { case 0: diff --git a/vendor/github.com/uudashr/iface/unused/unused.go b/vendor/github.com/uudashr/iface/unused/unused.go index c2efbf52c..9c301ae67 100644 --- a/vendor/github.com/uudashr/iface/unused/unused.go +++ b/vendor/github.com/uudashr/iface/unused/unused.go @@ -48,7 +48,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Collect all interface type declarations - ifaceDecls := make(map[string]token.Pos) + ifaceDecls := make(map[string]*ast.TypeSpec) + genDecls := make(map[string]*ast.GenDecl) // ifaceName -> GenDecl nodeFilter := []ast.Node{ (*ast.GenDecl)(nil), @@ -80,7 +81,7 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { _, ok = ts.Type.(*ast.InterfaceType) if !ok { - return + continue } if r.debug { @@ -93,7 +94,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { continue } - ifaceDecls[ts.Name.Name] = ts.Pos() + ifaceDecls[ts.Name.Name] = ts + genDecls[ts.Name.Name] = decl } }) @@ -117,21 +119,51 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { return } - pos := ifaceDecls[ident.Name] - if pos == ident.Pos() { + ts, ok := ifaceDecls[ident.Name] + if !ok { + return + } + + if ts.Pos() == ident.Pos() { // The identifier is the interface type declaration return } delete(ifaceDecls, ident.Name) + delete(genDecls, ident.Name) }) if r.debug { fmt.Printf("Package %s %s\n", pass.Pkg.Path(), pass.Pkg.Name()) } - for name, pos := range ifaceDecls { - pass.Reportf(pos, "interface %s is declared but not used within the package", name) + for name, ts := range ifaceDecls { + decl := genDecls[name] + + var node ast.Node + if len(decl.Specs) == 1 { + node = decl + } else { + node = ts + } + + msg := fmt.Sprintf("interface %s is declared but not used within the package", name) + pass.Report(analysis.Diagnostic{ + Pos: ts.Pos(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Remove the unused interface declaration", + TextEdits: []analysis.TextEdit{ + { + Pos: node.Pos(), + End: node.End(), + NewText: []byte{}, + }, + }, + }, + }, + }) } return nil, nil diff --git a/vendor/go-simpler.org/sloglint/sloglint.go b/vendor/go-simpler.org/sloglint/sloglint.go index bff37b723..97199b417 100644 --- a/vendor/go-simpler.org/sloglint/sloglint.go +++ b/vendor/go-simpler.org/sloglint/sloglint.go @@ -265,7 +265,18 @@ func visit(pass *analysis.Pass, opts *Options, node ast.Node, stack []ast.Node) if opts.NoRawKeys { forEachKey(pass.TypesInfo, keys, attrs, func(key ast.Expr) { - if ident, ok := key.(*ast.Ident); !ok || ident.Obj == nil || ident.Obj.Kind != ast.Con { + if selector, ok := key.(*ast.SelectorExpr); ok { + key = selector.Sel // the key is defined in another package, e.g. pkg.ConstKey. + } + isConst := false + if ident, ok := key.(*ast.Ident); ok { + if obj := pass.TypesInfo.ObjectOf(ident); obj != nil { + if _, ok := obj.(*types.Const); ok { + isConst = true + } + } + } + if !isConst { pass.Reportf(call.Pos(), "raw keys should not be used") } }) diff --git a/vendor/go.yaml.in/yaml/v3/LICENSE b/vendor/go.yaml.in/yaml/v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/vendor/go.yaml.in/yaml/v3/NOTICE similarity index 58% rename from vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go rename to vendor/go.yaml.in/yaml/v3/NOTICE index fc7316521..866d74a7a 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go +++ b/vendor/go.yaml.in/yaml/v3/NOTICE @@ -1,5 +1,4 @@ -/* -Copyright The Kubernetes Authors. +Copyright 2011-2016 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,16 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// IPAddressListerExpansion allows custom methods to be added to -// IPAddressLister. -type IPAddressListerExpansion interface{} - -// ServiceCIDRListerExpansion allows custom methods to be added to -// ServiceCIDRLister. -type ServiceCIDRListerExpansion interface{} diff --git a/vendor/go.yaml.in/yaml/v3/README.md b/vendor/go.yaml.in/yaml/v3/README.md new file mode 100644 index 000000000..15a85a635 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/README.md @@ -0,0 +1,171 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v3*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v3 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v3/apic.go b/vendor/go.yaml.in/yaml/v3/apic.go new file mode 100644 index 000000000..05fd305da --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v3/decode.go b/vendor/go.yaml.in/yaml/v3/decode.go new file mode 100644 index 000000000..02e2b17bf --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/decode.go @@ -0,0 +1,1018 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[interface{}]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[interface{}]bool, key interface{}, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[interface{}]bool, key interface{}) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v3/emitterc.go b/vendor/go.yaml.in/yaml/v3/emitterc.go new file mode 100644 index 000000000..ab4e03ba7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/emitterc.go @@ -0,0 +1,2054 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent_compact(emitter *yaml_emitter_t, flow, indentless bool, compact_seq bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// yaml_emitter_increase_indent preserves the original signature and delegates to +// yaml_emitter_increase_indent_compact without compact-sequence indentation +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + return yaml_emitter_increase_indent_compact(emitter, flow, indentless, false) +} + +// yaml_emitter_process_line_comment preserves the original signature and delegates to +// yaml_emitter_process_line_comment_linebreak passing false for linebreak +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + return yaml_emitter_process_line_comment_linebreak(emitter, false) +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emiiter.column tells us which column we are in in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.compact_sequence_indent + if !yaml_emitter_increase_indent_compact(emitter, false, false, seq) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.typ == yaml_SCALAR_EVENT { + if len(emitter.line_comment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment_linebreak(emitter *yaml_emitter_t, linebreak bool) bool { + if len(emitter.line_comment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak && !put_break(emitter) { + return false + } + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !yaml_emitter_process_line_comment_linebreak(emitter, true) { + return false + } + + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/encode.go b/vendor/go.yaml.in/yaml/v3/encode.go new file mode 100644 index 000000000..de9e72a3e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/encode.go @@ -0,0 +1,577 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + var n = reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v3/parserc.go b/vendor/go.yaml.in/yaml/v3/parserc.go new file mode 100644 index 000000000..25fe82363 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/parserc.go @@ -0,0 +1,1274 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + if token == nil { + return false + } + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/readerc.go b/vendor/go.yaml.in/yaml/v3/readerc.go new file mode 100644 index 000000000..56af24536 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/resolve.go b/vendor/go.yaml.in/yaml/v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v3/scannerc.go b/vendor/go.yaml.in/yaml/v3/scannerc.go new file mode 100644 index 000000000..30b1f0892 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/scannerc.go @@ -0,0 +1,3040 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + var next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index + peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/sorter.go b/vendor/go.yaml.in/yaml/v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v3/writerc.go b/vendor/go.yaml.in/yaml/v3/writerc.go new file mode 100644 index 000000000..266d0b092 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/go.yaml.in/yaml/v3/yaml.go b/vendor/go.yaml.in/yaml/v3/yaml.go new file mode 100644 index 000000000..0b101cd20 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yaml.go @@ -0,0 +1,703 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.compact_sequence_indent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.compact_sequence_indent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlh.go b/vendor/go.yaml.in/yaml/v3/yamlh.go new file mode 100644 index 000000000..f59aa40f6 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlh.go @@ -0,0 +1,811 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yaml_parser_set_input(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yaml_emitter_set_output(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v3/yamlprivateh.go b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go new file mode 100644 index 000000000..dea1ba961 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 9d260bab1..000000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -import "cmp" - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -// -// This type is redundant since Go 1.21 introduced [cmp.Ordered]. -// -//go:fix inline -type Ordered = cmp.Ordered diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go deleted file mode 100644 index 4a9747ef4..000000000 --- a/vendor/golang.org/x/exp/maps/maps.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package maps defines various functions useful with maps of any type. -package maps - -import "maps" - -// Keys returns the keys of the map m. -// The keys will be in an indeterminate order. -// -// The simplest true equivalent using the standard library is: -// -// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)) -func Keys[M ~map[K]V, K comparable, V any](m M) []K { - - r := make([]K, 0, len(m)) - for k := range m { - r = append(r, k) - } - return r -} - -// Values returns the values of the map m. -// The values will be in an indeterminate order. -// -// The simplest true equivalent using the standard library is: -// -// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)) -func Values[M ~map[K]V, K comparable, V any](m M) []V { - - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} - -// Equal reports whether two maps contain the same key/value pairs. -// Values are compared using ==. -// -//go:fix inline -func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { - return maps.Equal(m1, m2) -} - -// EqualFunc is like Equal, but compares values using eq. -// Keys are still compared with ==. -// -//go:fix inline -func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { - return maps.EqualFunc(m1, m2, eq) -} - -// Clear removes all entries from m, leaving it empty. -// -//go:fix inline -func Clear[M ~map[K]V, K comparable, V any](m M) { - clear(m) -} - -// Clone returns a copy of m. This is a shallow clone: -// the new keys and values are set using ordinary assignment. -// -//go:fix inline -func Clone[M ~map[K]V, K comparable, V any](m M) M { - return maps.Clone(m) -} - -// Copy copies all key/value pairs in src adding them to dst. -// When a key in src is already present in dst, -// the value in dst will be overwritten by the value associated -// with the key in src. -// -//go:fix inline -func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { - maps.Copy(dst, src) -} - -// DeleteFunc deletes any key/value pairs from m for which del returns true. -// -//go:fix inline -func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { - maps.DeleteFunc(m, del) -} diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go deleted file mode 100644 index a180d0e1d..000000000 --- a/vendor/golang.org/x/exp/slog/attr.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "fmt" - "time" -) - -// An Attr is a key-value pair. -type Attr struct { - Key string - Value Value -} - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return Attr{key, StringValue(value)} -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return Attr{key, Int64Value(value)} -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return Int64(key, int64(value)) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return Attr{key, Uint64Value(v)} -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return Attr{key, Float64Value(v)} -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return Attr{key, BoolValue(v)} -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return Attr{key, TimeValue(v)} -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return Attr{key, DurationValue(v)} -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return Attr{key, GroupValue(argsToAttrSlice(args)...)} -} - -func argsToAttrSlice(args []any) []Attr { - var ( - attr Attr - attrs []Attr - ) - for len(args) > 0 { - attr, args = argsToAttr(args) - attrs = append(attrs, attr) - } - return attrs -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return Attr{key, AnyValue(value)} -} - -// Equal reports whether a and b have equal keys and values. -func (a Attr) Equal(b Attr) bool { - return a.Key == b.Key && a.Value.Equal(b.Value) -} - -func (a Attr) String() string { - return fmt.Sprintf("%s=%s", a.Key, a.Value) -} - -// isEmpty reports whether a has an empty key and a nil value. -// That can be written as Attr{} or Any("", nil). -func (a Attr) isEmpty() bool { - return a.Key == "" && a.Value.num == 0 && a.Value.any == nil -} diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go deleted file mode 100644 index 4beaf8674..000000000 --- a/vendor/golang.org/x/exp/slog/doc.go +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package slog provides structured logging, -in which log records include a message, -a severity level, and various other attributes -expressed as key-value pairs. - -It defines a type, [Logger], -which provides several methods (such as [Logger.Info] and [Logger.Error]) -for reporting events of interest. - -Each Logger is associated with a [Handler]. -A Logger output method creates a [Record] from the method arguments -and passes it to the Handler, which decides how to handle it. -There is a default Logger accessible through top-level functions -(such as [Info] and [Error]) that call the corresponding Logger methods. - -A log record consists of a time, a level, a message, and a set of key-value -pairs, where the keys are strings and the values may be of any type. -As an example, - - slog.Info("hello", "count", 3) - -creates a record containing the time of the call, -a level of Info, the message "hello", and a single -pair with key "count" and value 3. - -The [Info] top-level function calls the [Logger.Info] method on the default Logger. -In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. -Besides these convenience methods for common levels, -there is also a [Logger.Log] method which takes the level as an argument. -Each of these methods has a corresponding top-level function that uses the -default logger. - -The default handler formats the log record's message, time, level, and attributes -as a string and passes it to the [log] package. - - 2022/11/08 15:28:26 INFO hello count=3 - -For more control over the output format, create a logger with a different handler. -This statement uses [New] to create a new logger with a TextHandler -that writes structured records in text form to standard error: - - logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) - -[TextHandler] output is a sequence of key=value pairs, easily and unambiguously -parsed by machine. This statement: - - logger.Info("hello", "count", 3) - -produces this output: - - time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 - -The package also provides [JSONHandler], whose output is line-delimited JSON: - - logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - logger.Info("hello", "count", 3) - -produces this output: - - {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} - -Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. -There are options for setting the minimum level (see Levels, below), -displaying the source file and line of the log call, and -modifying attributes before they are logged. - -Setting a logger as the default with - - slog.SetDefault(logger) - -will cause the top-level functions like [Info] to use it. -[SetDefault] also updates the default logger used by the [log] package, -so that existing applications that use [log.Printf] and related functions -will send log records to the logger's handler without needing to be rewritten. - -Some attributes are common to many log calls. -For example, you may wish to include the URL or trace identifier of a server request -with all log events arising from the request. -Rather than repeat the attribute with every log call, you can use [Logger.With] -to construct a new Logger containing the attributes: - - logger2 := logger.With("url", r.URL) - -The arguments to With are the same key-value pairs used in [Logger.Info]. -The result is a new Logger with the same handler as the original, but additional -attributes that will appear in the output of every call. - -# Levels - -A [Level] is an integer representing the importance or severity of a log event. -The higher the level, the more severe the event. -This package defines constants for the most common levels, -but any int can be used as a level. - -In an application, you may wish to log messages only at a certain level or greater. -One common configuration is to log messages at Info or higher levels, -suppressing debug logging until it is needed. -The built-in handlers can be configured with the minimum level to output by -setting [HandlerOptions.Level]. -The program's `main` function typically does this. -The default value is LevelInfo. - -Setting the [HandlerOptions.Level] field to a [Level] value -fixes the handler's minimum level throughout its lifetime. -Setting it to a [LevelVar] allows the level to be varied dynamically. -A LevelVar holds a Level and is safe to read or write from multiple -goroutines. -To vary the level dynamically for an entire program, first initialize -a global LevelVar: - - var programLevel = new(slog.LevelVar) // Info by default - -Then use the LevelVar to construct a handler, and make it the default: - - h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) - slog.SetDefault(slog.New(h)) - -Now the program can change its logging level with a single statement: - - programLevel.Set(slog.LevelDebug) - -# Groups - -Attributes can be collected into groups. -A group has a name that is used to qualify the names of its attributes. -How this qualification is displayed depends on the handler. -[TextHandler] separates the group and attribute names with a dot. -[JSONHandler] treats each group as a separate JSON object, with the group name as the key. - -Use [Group] to create a Group attribute from a name and a list of key-value pairs: - - slog.Group("request", - "method", r.Method, - "url", r.URL) - -TextHandler would display this group as - - request.method=GET request.url=http://example.com - -JSONHandler would display it as - - "request":{"method":"GET","url":"http://example.com"} - -Use [Logger.WithGroup] to qualify all of a Logger's output -with a group name. Calling WithGroup on a Logger results in a -new Logger with the same Handler as the original, but with all -its attributes qualified by the group name. - -This can help prevent duplicate attribute keys in large systems, -where subsystems might use the same keys. -Pass each subsystem a different Logger with its own group name so that -potential duplicates are qualified: - - logger := slog.Default().With("id", systemID) - parserLogger := logger.WithGroup("parser") - parseInput(input, parserLogger) - -When parseInput logs with parserLogger, its keys will be qualified with "parser", -so even if it uses the common key "id", the log line will have distinct keys. - -# Contexts - -Some handlers may wish to include information from the [context.Context] that is -available at the call site. One example of such information -is the identifier for the current span when tracing is enabled. - -The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first -argument, as do their corresponding top-level functions. - -Although the convenience methods on Logger (Info and so on) and the -corresponding top-level functions do not take a context, the alternatives ending -in "Context" do. For example, - - slog.InfoContext(ctx, "message") - -It is recommended to pass a context to an output method if one is available. - -# Attrs and Values - -An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as -alternating keys and values. The statement - - slog.Info("hello", slog.Int("count", 3)) - -behaves the same as - - slog.Info("hello", "count", 3) - -There are convenience constructors for [Attr] such as [Int], [String], and [Bool] -for common types, as well as the function [Any] for constructing Attrs of any -type. - -The value part of an Attr is a type called [Value]. -Like an [any], a Value can hold any Go value, -but it can represent typical values, including all numbers and strings, -without an allocation. - -For the most efficient log output, use [Logger.LogAttrs]. -It is similar to [Logger.Log] but accepts only Attrs, not alternating -keys and values; this allows it, too, to avoid allocation. - -The call - - logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3)) - -is the most efficient way to achieve the same output as - - slog.Info("hello", "count", 3) - -# Customizing a type's logging behavior - -If a type implements the [LogValuer] interface, the [Value] returned from its LogValue -method is used for logging. You can use this to control how values of the type -appear in logs. For example, you can redact secret information like passwords, -or gather a struct's fields in a Group. See the examples under [LogValuer] for -details. - -A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] -method handles these cases carefully, avoiding infinite loops and unbounded recursion. -Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly. - -# Wrapping output methods - -The logger functions use reflection over the call stack to find the file name -and line number of the logging call within the application. This can produce -incorrect source information for functions that wrap slog. For instance, if you -define this function in file mylog.go: - - func Infof(format string, args ...any) { - slog.Default().Info(fmt.Sprintf(format, args...)) - } - -and you call it like this in main.go: - - Infof(slog.Default(), "hello, %s", "world") - -then slog will report the source file as mylog.go, not main.go. - -A correct implementation of Infof will obtain the source location -(pc) and pass it to NewRecord. -The Infof function in the package-level example called "wrapping" -demonstrates how to do this. - -# Working with Records - -Sometimes a Handler will need to modify a Record -before passing it on to another Handler or backend. -A Record contains a mixture of simple public fields (e.g. Time, Level, Message) -and hidden fields that refer to state (such as attributes) indirectly. This -means that modifying a simple copy of a Record (e.g. by calling -[Record.Add] or [Record.AddAttrs] to add attributes) -may have unexpected effects on the original. -Before modifying a Record, use [Clone] to -create a copy that shares no state with the original, -or create a new Record with [NewRecord] -and build up its Attrs by traversing the old ones with [Record.Attrs]. - -# Performance considerations - -If profiling your application demonstrates that logging is taking significant time, -the following suggestions may help. - -If many log lines have a common attribute, use [Logger.With] to create a Logger with -that attribute. The built-in handlers will format that attribute only once, at the -call to [Logger.With]. The [Handler] interface is designed to allow that optimization, -and a well-written Handler should take advantage of it. - -The arguments to a log call are always evaluated, even if the log event is discarded. -If possible, defer computation so that it happens only if the value is actually logged. -For example, consider the call - - slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily - -The URL.String method will be called even if the logger discards Info-level events. -Instead, pass the URL directly: - - slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed - -The built-in [TextHandler] will call its String method, but only -if the log event is enabled. -Avoiding the call to String also preserves the structure of the underlying value. -For example [JSONHandler] emits the components of the parsed URL as a JSON object. -If you want to avoid eagerly paying the cost of the String call -without causing the handler to potentially inspect the structure of the value, -wrap the value in a fmt.Stringer implementation that hides its Marshal methods. - -You can also use the [LogValuer] interface to avoid unnecessary work in disabled log -calls. Say you need to log some expensive value: - - slog.Debug("frobbing", "value", computeExpensiveValue(arg)) - -Even if this line is disabled, computeExpensiveValue will be called. -To avoid that, define a type implementing LogValuer: - - type expensive struct { arg int } - - func (e expensive) LogValue() slog.Value { - return slog.AnyValue(computeExpensiveValue(e.arg)) - } - -Then use a value of that type in log calls: - - slog.Debug("frobbing", "value", expensive{arg}) - -Now computeExpensiveValue will only be called when the line is enabled. - -The built-in handlers acquire a lock before calling [io.Writer.Write] -to ensure that each record is written in one piece. User-defined -handlers are responsible for their own locking. -*/ -package slog diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go deleted file mode 100644 index bd635cb81..000000000 --- a/vendor/golang.org/x/exp/slog/handler.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "context" - "fmt" - "io" - "reflect" - "strconv" - "sync" - "time" - - "golang.org/x/exp/slices" - "golang.org/x/exp/slog/internal/buffer" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler interface { - // Enabled reports whether the handler handles records at the given level. - // The handler ignores records whose level is lower. - // It is called early, before any arguments are processed, - // to save effort if the log event should be discarded. - // If called from a Logger method, the first argument is the context - // passed to that method, or context.Background() if nil was passed - // or the method does not take a context. - // The context is passed so Enabled can use its values - // to make a decision. - Enabled(context.Context, Level) bool - - // Handle handles the Record. - // It will only be called when Enabled returns true. - // The Context argument is as for Enabled. - // It is present solely to provide Handlers access to the context's values. - // Canceling the context should not affect record processing. - // (Among other things, log messages may be necessary to debug a - // cancellation-related problem.) - // - // Handle methods that produce output should observe the following rules: - // - If r.Time is the zero time, ignore the time. - // - If r.PC is zero, ignore it. - // - Attr's values should be resolved. - // - If an Attr's key and value are both the zero value, ignore the Attr. - // This can be tested with attr.Equal(Attr{}). - // - If a group's key is empty, inline the group's Attrs. - // - If a group has no Attrs (even if it has a non-empty key), - // ignore it. - Handle(context.Context, Record) error - - // WithAttrs returns a new Handler whose attributes consist of - // both the receiver's attributes and the arguments. - // The Handler owns the slice: it may retain, modify or discard it. - WithAttrs(attrs []Attr) Handler - - // WithGroup returns a new Handler with the given group appended to - // the receiver's existing groups. - // The keys of all subsequent attributes, whether added by With or in a - // Record, should be qualified by the sequence of group names. - // - // How this qualification happens is up to the Handler, so long as - // this Handler's attribute keys differ from those of another Handler - // with a different sequence of group names. - // - // A Handler should treat WithGroup as starting a Group of Attrs that ends - // at the end of the log event. That is, - // - // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2)) - // - // should behave like - // - // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) - // - // If the name is empty, WithGroup returns the receiver. - WithGroup(name string) Handler -} - -type defaultHandler struct { - ch *commonHandler - // log.Output, except for testing - output func(calldepth int, message string) error -} - -func newDefaultHandler(output func(int, string) error) *defaultHandler { - return &defaultHandler{ - ch: &commonHandler{json: false}, - output: output, - } -} - -func (*defaultHandler) Enabled(_ context.Context, l Level) bool { - return l >= LevelInfo -} - -// Collect the level, attributes and message in a string and -// write it with the default log.Logger. -// Let the log.Logger handle time and file/line. -func (h *defaultHandler) Handle(ctx context.Context, r Record) error { - buf := buffer.New() - buf.WriteString(r.Level.String()) - buf.WriteByte(' ') - buf.WriteString(r.Message) - state := h.ch.newHandleState(buf, true, " ", nil) - defer state.free() - state.appendNonBuiltIns(r) - - // skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output] - return h.output(4, buf.String()) -} - -func (h *defaultHandler) WithAttrs(as []Attr) Handler { - return &defaultHandler{h.ch.withAttrs(as), h.output} -} - -func (h *defaultHandler) WithGroup(name string) Handler { - return &defaultHandler{h.ch.withGroup(name), h.output} -} - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions struct { - // AddSource causes the handler to compute the source code position - // of the log statement and add a SourceKey attribute to the output. - AddSource bool - - // Level reports the minimum record level that will be logged. - // The handler discards records with lower levels. - // If Level is nil, the handler assumes LevelInfo. - // The handler calls Level.Level for each record processed; - // to adjust the minimum level dynamically, use a LevelVar. - Level Leveler - - // ReplaceAttr is called to rewrite each non-group attribute before it is logged. - // The attribute's value has been resolved (see [Value.Resolve]). - // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded. - // - // The built-in attributes with keys "time", "level", "source", and "msg" - // are passed to this function, except that time is omitted - // if zero, and source is omitted if AddSource is false. - // - // The first argument is a list of currently open groups that contain the - // Attr. It must not be retained or modified. ReplaceAttr is never called - // for Group attributes, only their contents. For example, the attribute - // list - // - // Int("a", 1), Group("g", Int("b", 2)), Int("c", 3) - // - // results in consecutive calls to ReplaceAttr with the following arguments: - // - // nil, Int("a", 1) - // []string{"g"}, Int("b", 2) - // nil, Int("c", 3) - // - // ReplaceAttr can be used to change the default keys of the built-in - // attributes, convert types (for example, to replace a `time.Time` with the - // integer seconds since the Unix epoch), sanitize personal information, or - // remove attributes from the output. - ReplaceAttr func(groups []string, a Attr) Attr -} - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = "time" - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = "level" - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = "msg" - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = "source" -) - -type commonHandler struct { - json bool // true => output JSON; false => output text - opts HandlerOptions - preformattedAttrs []byte - groupPrefix string // for text: prefix of groups opened in preformatting - groups []string // all groups started from WithGroup - nOpenGroups int // the number of groups opened in preformattedAttrs - mu sync.Mutex - w io.Writer -} - -func (h *commonHandler) clone() *commonHandler { - // We can't use assignment because we can't copy the mutex. - return &commonHandler{ - json: h.json, - opts: h.opts, - preformattedAttrs: slices.Clip(h.preformattedAttrs), - groupPrefix: h.groupPrefix, - groups: slices.Clip(h.groups), - nOpenGroups: h.nOpenGroups, - w: h.w, - } -} - -// enabled reports whether l is greater than or equal to the -// minimum level. -func (h *commonHandler) enabled(l Level) bool { - minLevel := LevelInfo - if h.opts.Level != nil { - minLevel = h.opts.Level.Level() - } - return l >= minLevel -} - -func (h *commonHandler) withAttrs(as []Attr) *commonHandler { - h2 := h.clone() - // Pre-format the attributes as an optimization. - prefix := buffer.New() - defer prefix.Free() - prefix.WriteString(h.groupPrefix) - state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix) - defer state.free() - if len(h2.preformattedAttrs) > 0 { - state.sep = h.attrSep() - } - state.openGroups() - for _, a := range as { - state.appendAttr(a) - } - // Remember the new prefix for later keys. - h2.groupPrefix = state.prefix.String() - // Remember how many opened groups are in preformattedAttrs, - // so we don't open them again when we handle a Record. - h2.nOpenGroups = len(h2.groups) - return h2 -} - -func (h *commonHandler) withGroup(name string) *commonHandler { - if name == "" { - return h - } - h2 := h.clone() - h2.groups = append(h2.groups, name) - return h2 -} - -func (h *commonHandler) handle(r Record) error { - state := h.newHandleState(buffer.New(), true, "", nil) - defer state.free() - if h.json { - state.buf.WriteByte('{') - } - // Built-in attributes. They are not in a group. - stateGroups := state.groups - state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups. - rep := h.opts.ReplaceAttr - // time - if !r.Time.IsZero() { - key := TimeKey - val := r.Time.Round(0) // strip monotonic to match Attr behavior - if rep == nil { - state.appendKey(key) - state.appendTime(val) - } else { - state.appendAttr(Time(key, val)) - } - } - // level - key := LevelKey - val := r.Level - if rep == nil { - state.appendKey(key) - state.appendString(val.String()) - } else { - state.appendAttr(Any(key, val)) - } - // source - if h.opts.AddSource { - state.appendAttr(Any(SourceKey, r.source())) - } - key = MessageKey - msg := r.Message - if rep == nil { - state.appendKey(key) - state.appendString(msg) - } else { - state.appendAttr(String(key, msg)) - } - state.groups = stateGroups // Restore groups passed to ReplaceAttrs. - state.appendNonBuiltIns(r) - state.buf.WriteByte('\n') - - h.mu.Lock() - defer h.mu.Unlock() - _, err := h.w.Write(*state.buf) - return err -} - -func (s *handleState) appendNonBuiltIns(r Record) { - // preformatted Attrs - if len(s.h.preformattedAttrs) > 0 { - s.buf.WriteString(s.sep) - s.buf.Write(s.h.preformattedAttrs) - s.sep = s.h.attrSep() - } - // Attrs in Record -- unlike the built-in ones, they are in groups started - // from WithGroup. - s.prefix = buffer.New() - defer s.prefix.Free() - s.prefix.WriteString(s.h.groupPrefix) - s.openGroups() - r.Attrs(func(a Attr) bool { - s.appendAttr(a) - return true - }) - if s.h.json { - // Close all open groups. - for range s.h.groups { - s.buf.WriteByte('}') - } - // Close the top-level object. - s.buf.WriteByte('}') - } -} - -// attrSep returns the separator between attributes. -func (h *commonHandler) attrSep() string { - if h.json { - return "," - } - return " " -} - -// handleState holds state for a single call to commonHandler.handle. -// The initial value of sep determines whether to emit a separator -// before the next key, after which it stays true. -type handleState struct { - h *commonHandler - buf *buffer.Buffer - freeBuf bool // should buf be freed? - sep string // separator to write before next key - prefix *buffer.Buffer // for text: key prefix - groups *[]string // pool-allocated slice of active groups, for ReplaceAttr -} - -var groupPool = sync.Pool{New: func() any { - s := make([]string, 0, 10) - return &s -}} - -func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState { - s := handleState{ - h: h, - buf: buf, - freeBuf: freeBuf, - sep: sep, - prefix: prefix, - } - if h.opts.ReplaceAttr != nil { - s.groups = groupPool.Get().(*[]string) - *s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...) - } - return s -} - -func (s *handleState) free() { - if s.freeBuf { - s.buf.Free() - } - if gs := s.groups; gs != nil { - *gs = (*gs)[:0] - groupPool.Put(gs) - } -} - -func (s *handleState) openGroups() { - for _, n := range s.h.groups[s.h.nOpenGroups:] { - s.openGroup(n) - } -} - -// Separator for group names and keys. -const keyComponentSep = '.' - -// openGroup starts a new group of attributes -// with the given name. -func (s *handleState) openGroup(name string) { - if s.h.json { - s.appendKey(name) - s.buf.WriteByte('{') - s.sep = "" - } else { - s.prefix.WriteString(name) - s.prefix.WriteByte(keyComponentSep) - } - // Collect group names for ReplaceAttr. - if s.groups != nil { - *s.groups = append(*s.groups, name) - } -} - -// closeGroup ends the group with the given name. -func (s *handleState) closeGroup(name string) { - if s.h.json { - s.buf.WriteByte('}') - } else { - (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */] - } - s.sep = s.h.attrSep() - if s.groups != nil { - *s.groups = (*s.groups)[:len(*s.groups)-1] - } -} - -// appendAttr appends the Attr's key and value using app. -// It handles replacement and checking for an empty key. -// after replacement). -func (s *handleState) appendAttr(a Attr) { - if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup { - var gs []string - if s.groups != nil { - gs = *s.groups - } - // Resolve before calling ReplaceAttr, so the user doesn't have to. - a.Value = a.Value.Resolve() - a = rep(gs, a) - } - a.Value = a.Value.Resolve() - // Elide empty Attrs. - if a.isEmpty() { - return - } - // Special case: Source. - if v := a.Value; v.Kind() == KindAny { - if src, ok := v.Any().(*Source); ok { - if s.h.json { - a.Value = src.group() - } else { - a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line)) - } - } - } - if a.Value.Kind() == KindGroup { - attrs := a.Value.Group() - // Output only non-empty groups. - if len(attrs) > 0 { - // Inline a group with an empty key. - if a.Key != "" { - s.openGroup(a.Key) - } - for _, aa := range attrs { - s.appendAttr(aa) - } - if a.Key != "" { - s.closeGroup(a.Key) - } - } - } else { - s.appendKey(a.Key) - s.appendValue(a.Value) - } -} - -func (s *handleState) appendError(err error) { - s.appendString(fmt.Sprintf("!ERROR:%v", err)) -} - -func (s *handleState) appendKey(key string) { - s.buf.WriteString(s.sep) - if s.prefix != nil { - // TODO: optimize by avoiding allocation. - s.appendString(string(*s.prefix) + key) - } else { - s.appendString(key) - } - if s.h.json { - s.buf.WriteByte(':') - } else { - s.buf.WriteByte('=') - } - s.sep = s.h.attrSep() -} - -func (s *handleState) appendString(str string) { - if s.h.json { - s.buf.WriteByte('"') - *s.buf = appendEscapedJSONString(*s.buf, str) - s.buf.WriteByte('"') - } else { - // text - if needsQuoting(str) { - *s.buf = strconv.AppendQuote(*s.buf, str) - } else { - s.buf.WriteString(str) - } - } -} - -func (s *handleState) appendValue(v Value) { - defer func() { - if r := recover(); r != nil { - // If it panics with a nil pointer, the most likely cases are - // an encoding.TextMarshaler or error fails to guard against nil, - // in which case "" seems to be the feasible choice. - // - // Adapted from the code in fmt/print.go. - if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { - s.appendString("") - return - } - - // Otherwise just print the original panic message. - s.appendString(fmt.Sprintf("!PANIC: %v", r)) - } - }() - - var err error - if s.h.json { - err = appendJSONValue(s, v) - } else { - err = appendTextValue(s, v) - } - if err != nil { - s.appendError(err) - } -} - -func (s *handleState) appendTime(t time.Time) { - if s.h.json { - appendJSONTime(s, t) - } else { - writeTimeRFC3339Millis(s.buf, t) - } -} - -// This takes half the time of Time.AppendFormat. -func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) { - year, month, day := t.Date() - buf.WritePosIntWidth(year, 4) - buf.WriteByte('-') - buf.WritePosIntWidth(int(month), 2) - buf.WriteByte('-') - buf.WritePosIntWidth(day, 2) - buf.WriteByte('T') - hour, min, sec := t.Clock() - buf.WritePosIntWidth(hour, 2) - buf.WriteByte(':') - buf.WritePosIntWidth(min, 2) - buf.WriteByte(':') - buf.WritePosIntWidth(sec, 2) - ns := t.Nanosecond() - buf.WriteByte('.') - buf.WritePosIntWidth(ns/1e6, 3) - _, offsetSeconds := t.Zone() - if offsetSeconds == 0 { - buf.WriteByte('Z') - } else { - offsetMinutes := offsetSeconds / 60 - if offsetMinutes < 0 { - buf.WriteByte('-') - offsetMinutes = -offsetMinutes - } else { - buf.WriteByte('+') - } - buf.WritePosIntWidth(offsetMinutes/60, 2) - buf.WriteByte(':') - buf.WritePosIntWidth(offsetMinutes%60, 2) - } -} diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go deleted file mode 100644 index 7786c166e..000000000 --- a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buffer provides a pool-allocated byte buffer. -package buffer - -import ( - "sync" -) - -// Buffer adapted from go/src/fmt/print.go -type Buffer []byte - -// Having an initial size gives a dramatic speedup. -var bufPool = sync.Pool{ - New: func() any { - b := make([]byte, 0, 1024) - return (*Buffer)(&b) - }, -} - -func New() *Buffer { - return bufPool.Get().(*Buffer) -} - -func (b *Buffer) Free() { - // To reduce peak allocation, return only smaller buffers to the pool. - const maxBufferSize = 16 << 10 - if cap(*b) <= maxBufferSize { - *b = (*b)[:0] - bufPool.Put(b) - } -} - -func (b *Buffer) Reset() { - *b = (*b)[:0] -} - -func (b *Buffer) Write(p []byte) (int, error) { - *b = append(*b, p...) - return len(p), nil -} - -func (b *Buffer) WriteString(s string) { - *b = append(*b, s...) -} - -func (b *Buffer) WriteByte(c byte) { - *b = append(*b, c) -} - -func (b *Buffer) WritePosInt(i int) { - b.WritePosIntWidth(i, 0) -} - -// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left -// by zeroes to the given width. Use a width of 0 to omit padding. -func (b *Buffer) WritePosIntWidth(i, width int) { - // Cheap integer to fixed-width decimal ASCII. - // Copied from log/log.go. - - if i < 0 { - panic("negative int") - } - - // Assemble decimal in reverse order. - var bb [20]byte - bp := len(bb) - 1 - for i >= 10 || width > 1 { - width-- - q := i / 10 - bb[bp] = byte('0' + i - q*10) - bp-- - i = q - } - // i < 10 - bb[bp] = byte('0' + i) - b.Write(bb[bp:]) -} - -func (b *Buffer) String() string { - return string(*b) -} diff --git a/vendor/golang.org/x/exp/slog/internal/ignorepc.go b/vendor/golang.org/x/exp/slog/internal/ignorepc.go deleted file mode 100644 index d1256426f..000000000 --- a/vendor/golang.org/x/exp/slog/internal/ignorepc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// If IgnorePC is true, do not invoke runtime.Callers to get the pc. -// This is solely for benchmarking the slowdown from runtime.Callers. -var IgnorePC = false diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go deleted file mode 100644 index 157ada869..000000000 --- a/vendor/golang.org/x/exp/slog/json_handler.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "time" - "unicode/utf8" - - "golang.org/x/exp/slog/internal/buffer" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler struct { - *commonHandler -} - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - if opts == nil { - opts = &HandlerOptions{} - } - return &JSONHandler{ - &commonHandler{ - json: true, - w: w, - opts: *opts, - }, - } -} - -// Enabled reports whether the handler handles records at the given level. -// The handler ignores records whose level is lower. -func (h *JSONHandler) Enabled(_ context.Context, level Level) bool { - return h.commonHandler.enabled(level) -} - -// WithAttrs returns a new JSONHandler whose attributes consists -// of h's attributes followed by attrs. -func (h *JSONHandler) WithAttrs(attrs []Attr) Handler { - return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)} -} - -func (h *JSONHandler) WithGroup(name string) Handler { - return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)} -} - -// Handle formats its argument Record as a JSON object on a single line. -// -// If the Record's time is zero, the time is omitted. -// Otherwise, the key is "time" -// and the value is output as with json.Marshal. -// -// If the Record's level is zero, the level is omitted. -// Otherwise, the key is "level" -// and the value of [Level.String] is output. -// -// If the AddSource option is set and source information is available, -// the key is "source" -// and the value is output as "FILE:LINE". -// -// The message's key is "msg". -// -// To modify these or other attributes, or remove them from the output, use -// [HandlerOptions.ReplaceAttr]. -// -// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false), -// with two exceptions. -// -// First, an Attr whose Value is of type error is formatted as a string, by -// calling its Error method. Only errors in Attrs receive this special treatment, -// not errors embedded in structs, slices, maps or other data structures that -// are processed by the encoding/json package. -// -// Second, an encoding failure does not cause Handle to return an error. -// Instead, the error message is formatted as a string. -// -// Each call to Handle results in a single serialized call to io.Writer.Write. -func (h *JSONHandler) Handle(_ context.Context, r Record) error { - return h.commonHandler.handle(r) -} - -// Adapted from time.Time.MarshalJSON to avoid allocation. -func appendJSONTime(s *handleState, t time.Time) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - s.appendError(errors.New("time.Time year outside of range [0,9999]")) - } - s.buf.WriteByte('"') - *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano) - s.buf.WriteByte('"') -} - -func appendJSONValue(s *handleState, v Value) error { - switch v.Kind() { - case KindString: - s.appendString(v.str()) - case KindInt64: - *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10) - case KindUint64: - *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10) - case KindFloat64: - // json.Marshal is funny about floats; it doesn't - // always match strconv.AppendFloat. So just call it. - // That's expensive, but floats are rare. - if err := appendJSONMarshal(s.buf, v.Float64()); err != nil { - return err - } - case KindBool: - *s.buf = strconv.AppendBool(*s.buf, v.Bool()) - case KindDuration: - // Do what json.Marshal does. - *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10) - case KindTime: - s.appendTime(v.Time()) - case KindAny: - a := v.Any() - _, jm := a.(json.Marshaler) - if err, ok := a.(error); ok && !jm { - s.appendString(err.Error()) - } else { - return appendJSONMarshal(s.buf, a) - } - default: - panic(fmt.Sprintf("bad kind: %s", v.Kind())) - } - return nil -} - -func appendJSONMarshal(buf *buffer.Buffer, v any) error { - // Use a json.Encoder to avoid escaping HTML. - var bb bytes.Buffer - enc := json.NewEncoder(&bb) - enc.SetEscapeHTML(false) - if err := enc.Encode(v); err != nil { - return err - } - bs := bb.Bytes() - buf.Write(bs[:len(bs)-1]) // remove final newline - return nil -} - -// appendEscapedJSONString escapes s for JSON and appends it to buf. -// It does not surround the string in quotation marks. -// -// Modified from encoding/json/encode.go:encodeState.string, -// with escapeHTML set to false. -func appendEscapedJSONString(buf []byte, s string) []byte { - char := func(b byte) { buf = append(buf, b) } - str := func(s string) { buf = append(buf, s...) } - - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - str(s[start:i]) - } - char('\\') - switch b { - case '\\', '"': - char(b) - case '\n': - char('n') - case '\r': - char('r') - case '\t': - char('t') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - str(`u00`) - char(hex[b>>4]) - char(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - str(s[start:i]) - } - str(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - str(s[start:i]) - } - str(`\u202`) - char(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - str(s[start:]) - } - return buf -} - -var hex = "0123456789abcdef" - -// Copied from encoding/json/tables.go. -// -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} diff --git a/vendor/golang.org/x/exp/slog/level.go b/vendor/golang.org/x/exp/slog/level.go deleted file mode 100644 index b2365f0aa..000000000 --- a/vendor/golang.org/x/exp/slog/level.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "errors" - "fmt" - "strconv" - "strings" - "sync/atomic" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level int - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// - -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = -4 - LevelInfo Level = 0 - LevelWarn Level = 4 - LevelError Level = 8 -) - -// String returns a name for the level. -// If the level has a name, then that name -// in uppercase is returned. -// If the level is between named values, then -// an integer is appended to the uppercased name. -// Examples: -// -// LevelWarn.String() => "WARN" -// (LevelInfo+2).String() => "INFO+2" -func (l Level) String() string { - str := func(base string, val Level) string { - if val == 0 { - return base - } - return fmt.Sprintf("%s%+d", base, val) - } - - switch { - case l < LevelInfo: - return str("DEBUG", l-LevelDebug) - case l < LevelWarn: - return str("INFO", l-LevelInfo) - case l < LevelError: - return str("WARN", l-LevelWarn) - default: - return str("ERROR", l-LevelError) - } -} - -// MarshalJSON implements [encoding/json.Marshaler] -// by quoting the output of [Level.String]. -func (l Level) MarshalJSON() ([]byte, error) { - // AppendQuote is sufficient for JSON-encoding all Level strings. - // They don't contain any runes that would produce invalid JSON - // when escaped. - return strconv.AppendQuote(nil, l.String()), nil -} - -// UnmarshalJSON implements [encoding/json.Unmarshaler] -// It accepts any string produced by [Level.MarshalJSON], -// ignoring case. -// It also accepts numeric offsets that would result in a different string on -// output. For example, "Error-8" would marshal as "INFO". -func (l *Level) UnmarshalJSON(data []byte) error { - s, err := strconv.Unquote(string(data)) - if err != nil { - return err - } - return l.parse(s) -} - -// MarshalText implements [encoding.TextMarshaler] -// by calling [Level.String]. -func (l Level) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -// UnmarshalText implements [encoding.TextUnmarshaler]. -// It accepts any string produced by [Level.MarshalText], -// ignoring case. -// It also accepts numeric offsets that would result in a different string on -// output. For example, "Error-8" would marshal as "INFO". -func (l *Level) UnmarshalText(data []byte) error { - return l.parse(string(data)) -} - -func (l *Level) parse(s string) (err error) { - defer func() { - if err != nil { - err = fmt.Errorf("slog: level string %q: %w", s, err) - } - }() - - name := s - offset := 0 - if i := strings.IndexAny(s, "+-"); i >= 0 { - name = s[:i] - offset, err = strconv.Atoi(s[i:]) - if err != nil { - return err - } - } - switch strings.ToUpper(name) { - case "DEBUG": - *l = LevelDebug - case "INFO": - *l = LevelInfo - case "WARN": - *l = LevelWarn - case "ERROR": - *l = LevelError - default: - return errors.New("unknown name") - } - *l += Level(offset) - return nil -} - -// Level returns the receiver. -// It implements Leveler. -func (l Level) Level() Level { return l } - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar struct { - val atomic.Int64 -} - -// Level returns v's level. -func (v *LevelVar) Level() Level { - return Level(int(v.val.Load())) -} - -// Set sets v's level to l. -func (v *LevelVar) Set(l Level) { - v.val.Store(int64(l)) -} - -func (v *LevelVar) String() string { - return fmt.Sprintf("LevelVar(%s)", v.Level()) -} - -// MarshalText implements [encoding.TextMarshaler] -// by calling [Level.MarshalText]. -func (v *LevelVar) MarshalText() ([]byte, error) { - return v.Level().MarshalText() -} - -// UnmarshalText implements [encoding.TextUnmarshaler] -// by calling [Level.UnmarshalText]. -func (v *LevelVar) UnmarshalText(data []byte) error { - var l Level - if err := l.UnmarshalText(data); err != nil { - return err - } - v.Set(l) - return nil -} - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler interface { - Level() Level -} diff --git a/vendor/golang.org/x/exp/slog/logger.go b/vendor/golang.org/x/exp/slog/logger.go deleted file mode 100644 index e87ec9936..000000000 --- a/vendor/golang.org/x/exp/slog/logger.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "context" - "log" - "runtime" - "sync/atomic" - "time" - - "golang.org/x/exp/slog/internal" -) - -var defaultLogger atomic.Value - -func init() { - defaultLogger.Store(New(newDefaultHandler(log.Output))) -} - -// Default returns the default Logger. -func Default() *Logger { return defaultLogger.Load().(*Logger) } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - defaultLogger.Store(l) - // If the default's handler is a defaultHandler, then don't use a handleWriter, - // or we'll deadlock as they both try to acquire the log default mutex. - // The defaultHandler will use whatever the log default writer is currently - // set to, which is correct. - // This can occur with SetDefault(Default()). - // See TestSetDefault. - if _, ok := l.Handler().(*defaultHandler); !ok { - capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0 - log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC}) - log.SetFlags(0) // we want just the log message, no time or location - } -} - -// handlerWriter is an io.Writer that calls a Handler. -// It is used to link the default log.Logger to the default slog.Logger. -type handlerWriter struct { - h Handler - level Level - capturePC bool -} - -func (w *handlerWriter) Write(buf []byte) (int, error) { - if !w.h.Enabled(context.Background(), w.level) { - return 0, nil - } - var pc uintptr - if !internal.IgnorePC && w.capturePC { - // skip [runtime.Callers, w.Write, Logger.Output, log.Print] - var pcs [1]uintptr - runtime.Callers(4, pcs[:]) - pc = pcs[0] - } - - // Remove final newline. - origLen := len(buf) // Report that the entire buf was written. - if len(buf) > 0 && buf[len(buf)-1] == '\n' { - buf = buf[:len(buf)-1] - } - r := NewRecord(time.Now(), w.level, string(buf), pc) - return origLen, w.h.Handle(context.Background(), r) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger struct { - handler Handler // for structured logging -} - -func (l *Logger) clone() *Logger { - c := *l - return &c -} - -// Handler returns l's Handler. -func (l *Logger) Handler() Handler { return l.handler } - -// With returns a new Logger that includes the given arguments, converted to -// Attrs as in [Logger.Log]. -// The Attrs will be added to each output from the Logger. -// The new Logger shares the old Logger's context. -// The new Logger's handler is the result of calling WithAttrs on the receiver's -// handler. -func (l *Logger) With(args ...any) *Logger { - c := l.clone() - c.handler = l.handler.WithAttrs(argsToAttrSlice(args)) - return c -} - -// WithGroup returns a new Logger that starts a group. The keys of all -// attributes added to the Logger will be qualified by the given name. -// (How that qualification happens depends on the [Handler.WithGroup] -// method of the Logger's Handler.) -// The new Logger shares the old Logger's context. -// -// The new Logger's handler is the result of calling WithGroup on the receiver's -// handler. -func (l *Logger) WithGroup(name string) *Logger { - c := l.clone() - c.handler = l.handler.WithGroup(name) - return c - -} - -// New creates a new Logger with the given non-nil Handler and a nil context. -func New(h Handler) *Logger { - if h == nil { - panic("nil Handler") - } - return &Logger{handler: h} -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return Default().With(args...) -} - -// Enabled reports whether l emits log records at the given context and level. -func (l *Logger) Enabled(ctx context.Context, level Level) bool { - if ctx == nil { - ctx = context.Background() - } - return l.Handler().Enabled(ctx, level) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return log.New(&handlerWriter{h, level, true}, "", 0) -} - -// Log emits a log record with the current time and the given level and message. -// The Record's Attrs consist of the Logger's attributes followed by -// the Attrs specified by args. -// -// The attribute arguments are processed as follows: -// - If an argument is an Attr, it is used as is. -// - If an argument is a string and this is not the last argument, -// the following argument is treated as the value and the two are combined -// into an Attr. -// - Otherwise, the argument is treated as a value with key "!BADKEY". -func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) { - l.log(ctx, level, msg, args...) -} - -// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. -func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - l.logAttrs(ctx, level, msg, attrs...) -} - -// Debug logs at LevelDebug. -func (l *Logger) Debug(msg string, args ...any) { - l.log(nil, LevelDebug, msg, args...) -} - -// DebugContext logs at LevelDebug with the given context. -func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelDebug, msg, args...) -} - -// DebugCtx logs at LevelDebug with the given context. -// Deprecated: Use Logger.DebugContext. -func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelDebug, msg, args...) -} - -// Info logs at LevelInfo. -func (l *Logger) Info(msg string, args ...any) { - l.log(nil, LevelInfo, msg, args...) -} - -// InfoContext logs at LevelInfo with the given context. -func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelInfo, msg, args...) -} - -// InfoCtx logs at LevelInfo with the given context. -// Deprecated: Use Logger.InfoContext. -func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelInfo, msg, args...) -} - -// Warn logs at LevelWarn. -func (l *Logger) Warn(msg string, args ...any) { - l.log(nil, LevelWarn, msg, args...) -} - -// WarnContext logs at LevelWarn with the given context. -func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelWarn, msg, args...) -} - -// WarnCtx logs at LevelWarn with the given context. -// Deprecated: Use Logger.WarnContext. -func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelWarn, msg, args...) -} - -// Error logs at LevelError. -func (l *Logger) Error(msg string, args ...any) { - l.log(nil, LevelError, msg, args...) -} - -// ErrorContext logs at LevelError with the given context. -func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelError, msg, args...) -} - -// ErrorCtx logs at LevelError with the given context. -// Deprecated: Use Logger.ErrorContext. -func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelError, msg, args...) -} - -// log is the low-level logging method for methods that take ...any. -// It must always be called directly by an exported logging method -// or function, because it uses a fixed call depth to obtain the pc. -func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) { - if !l.Enabled(ctx, level) { - return - } - var pc uintptr - if !internal.IgnorePC { - var pcs [1]uintptr - // skip [runtime.Callers, this function, this function's caller] - runtime.Callers(3, pcs[:]) - pc = pcs[0] - } - r := NewRecord(time.Now(), level, msg, pc) - r.Add(args...) - if ctx == nil { - ctx = context.Background() - } - _ = l.Handler().Handle(ctx, r) -} - -// logAttrs is like [Logger.log], but for methods that take ...Attr. -func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - if !l.Enabled(ctx, level) { - return - } - var pc uintptr - if !internal.IgnorePC { - var pcs [1]uintptr - // skip [runtime.Callers, this function, this function's caller] - runtime.Callers(3, pcs[:]) - pc = pcs[0] - } - r := NewRecord(time.Now(), level, msg, pc) - r.AddAttrs(attrs...) - if ctx == nil { - ctx = context.Background() - } - _ = l.Handler().Handle(ctx, r) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - Default().log(nil, LevelDebug, msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelDebug, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - Default().log(nil, LevelInfo, msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelInfo, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - Default().log(nil, LevelWarn, msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelWarn, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - Default().log(nil, LevelError, msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelError, msg, args...) -} - -// DebugCtx calls Logger.DebugContext on the default logger. -// Deprecated: call DebugContext. -func DebugCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelDebug, msg, args...) -} - -// InfoCtx calls Logger.InfoContext on the default logger. -// Deprecated: call InfoContext. -func InfoCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelInfo, msg, args...) -} - -// WarnCtx calls Logger.WarnContext on the default logger. -// Deprecated: call WarnContext. -func WarnCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelWarn, msg, args...) -} - -// ErrorCtx calls Logger.ErrorContext on the default logger. -// Deprecated: call ErrorContext. -func ErrorCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelError, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - Default().log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - Default().logAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/golang.org/x/exp/slog/noplog.bench b/vendor/golang.org/x/exp/slog/noplog.bench deleted file mode 100644 index ed9296ff6..000000000 --- a/vendor/golang.org/x/exp/slog/noplog.bench +++ /dev/null @@ -1,36 +0,0 @@ -goos: linux -goarch: amd64 -pkg: golang.org/x/exp/slog -cpu: Intel(R) Xeon(R) CPU @ 2.20GHz -BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op -PASS -ok golang.org/x/exp/slog 40.566s diff --git a/vendor/golang.org/x/exp/slog/record.go b/vendor/golang.org/x/exp/slog/record.go deleted file mode 100644 index 38b3440f7..000000000 --- a/vendor/golang.org/x/exp/slog/record.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "runtime" - "time" - - "golang.org/x/exp/slices" -) - -const nAttrsInline = 5 - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Use [Record.Clone] to create a copy with no shared state. -type Record struct { - // The time at which the output method (Log, Info, etc.) was called. - Time time.Time - - // The log message. - Message string - - // The level of the event. - Level Level - - // The program counter at the time the record was constructed, as determined - // by runtime.Callers. If zero, no program counter is available. - // - // The only valid use for this value is as an argument to - // [runtime.CallersFrames]. In particular, it must not be passed to - // [runtime.FuncForPC]. - PC uintptr - - // Allocation optimization: an inline array sized to hold - // the majority of log calls (based on examination of open-source - // code). It holds the start of the list of Attrs. - front [nAttrsInline]Attr - - // The number of Attrs in front. - nFront int - - // The list of Attrs except for those in front. - // Invariants: - // - len(back) > 0 iff nFront == len(front) - // - Unused array elements are zero. Used to detect mistakes. - back []Attr -} - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return Record{ - Time: t, - Message: msg, - Level: level, - PC: pc, - } -} - -// Clone returns a copy of the record with no shared state. -// The original record and the clone can both be modified -// without interfering with each other. -func (r Record) Clone() Record { - r.back = slices.Clip(r.back) // prevent append from mutating shared array - return r -} - -// NumAttrs returns the number of attributes in the Record. -func (r Record) NumAttrs() int { - return r.nFront + len(r.back) -} - -// Attrs calls f on each Attr in the Record. -// Iteration stops if f returns false. -func (r Record) Attrs(f func(Attr) bool) { - for i := 0; i < r.nFront; i++ { - if !f(r.front[i]) { - return - } - } - for _, a := range r.back { - if !f(a) { - return - } - } -} - -// AddAttrs appends the given Attrs to the Record's list of Attrs. -func (r *Record) AddAttrs(attrs ...Attr) { - n := copy(r.front[r.nFront:], attrs) - r.nFront += n - // Check if a copy was modified by slicing past the end - // and seeing if the Attr there is non-zero. - if cap(r.back) > len(r.back) { - end := r.back[:len(r.back)+1][len(r.back)] - if !end.isEmpty() { - panic("copies of a slog.Record were both modified") - } - } - r.back = append(r.back, attrs[n:]...) -} - -// Add converts the args to Attrs as described in [Logger.Log], -// then appends the Attrs to the Record's list of Attrs. -func (r *Record) Add(args ...any) { - var a Attr - for len(args) > 0 { - a, args = argsToAttr(args) - if r.nFront < len(r.front) { - r.front[r.nFront] = a - r.nFront++ - } else { - if r.back == nil { - r.back = make([]Attr, 0, countAttrs(args)) - } - r.back = append(r.back, a) - } - } - -} - -// countAttrs returns the number of Attrs that would be created from args. -func countAttrs(args []any) int { - n := 0 - for i := 0; i < len(args); i++ { - n++ - if _, ok := args[i].(string); ok { - i++ - } - } - return n -} - -const badKey = "!BADKEY" - -// argsToAttr turns a prefix of the nonempty args slice into an Attr -// and returns the unconsumed portion of the slice. -// If args[0] is an Attr, it returns it. -// If args[0] is a string, it treats the first two elements as -// a key-value pair. -// Otherwise, it treats args[0] as a value with a missing key. -func argsToAttr(args []any) (Attr, []any) { - switch x := args[0].(type) { - case string: - if len(args) == 1 { - return String(badKey, x), nil - } - return Any(x, args[1]), args[2:] - - case Attr: - return x, args[1:] - - default: - return Any(badKey, x), args[1:] - } -} - -// Source describes the location of a line of source code. -type Source struct { - // Function is the package path-qualified function name containing the - // source line. If non-empty, this string uniquely identifies a single - // function in the program. This may be the empty string if not known. - Function string `json:"function"` - // File and Line are the file name and line number (1-based) of the source - // line. These may be the empty string and zero, respectively, if not known. - File string `json:"file"` - Line int `json:"line"` -} - -// attrs returns the non-zero fields of s as a slice of attrs. -// It is similar to a LogValue method, but we don't want Source -// to implement LogValuer because it would be resolved before -// the ReplaceAttr function was called. -func (s *Source) group() Value { - var as []Attr - if s.Function != "" { - as = append(as, String("function", s.Function)) - } - if s.File != "" { - as = append(as, String("file", s.File)) - } - if s.Line != 0 { - as = append(as, Int("line", s.Line)) - } - return GroupValue(as...) -} - -// source returns a Source for the log event. -// If the Record was created without the necessary information, -// or if the location is unavailable, it returns a non-nil *Source -// with zero fields. -func (r Record) source() *Source { - fs := runtime.CallersFrames([]uintptr{r.PC}) - f, _ := fs.Next() - return &Source{ - Function: f.Function, - File: f.File, - Line: f.Line, - } -} diff --git a/vendor/golang.org/x/exp/slog/text_handler.go b/vendor/golang.org/x/exp/slog/text_handler.go deleted file mode 100644 index 75b66b716..000000000 --- a/vendor/golang.org/x/exp/slog/text_handler.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "context" - "encoding" - "fmt" - "io" - "reflect" - "strconv" - "unicode" - "unicode/utf8" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler struct { - *commonHandler -} - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - if opts == nil { - opts = &HandlerOptions{} - } - return &TextHandler{ - &commonHandler{ - json: false, - w: w, - opts: *opts, - }, - } -} - -// Enabled reports whether the handler handles records at the given level. -// The handler ignores records whose level is lower. -func (h *TextHandler) Enabled(_ context.Context, level Level) bool { - return h.commonHandler.enabled(level) -} - -// WithAttrs returns a new TextHandler whose attributes consists -// of h's attributes followed by attrs. -func (h *TextHandler) WithAttrs(attrs []Attr) Handler { - return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)} -} - -func (h *TextHandler) WithGroup(name string) Handler { - return &TextHandler{commonHandler: h.commonHandler.withGroup(name)} -} - -// Handle formats its argument Record as a single line of space-separated -// key=value items. -// -// If the Record's time is zero, the time is omitted. -// Otherwise, the key is "time" -// and the value is output in RFC3339 format with millisecond precision. -// -// If the Record's level is zero, the level is omitted. -// Otherwise, the key is "level" -// and the value of [Level.String] is output. -// -// If the AddSource option is set and source information is available, -// the key is "source" and the value is output as FILE:LINE. -// -// The message's key is "msg". -// -// To modify these or other attributes, or remove them from the output, use -// [HandlerOptions.ReplaceAttr]. -// -// If a value implements [encoding.TextMarshaler], the result of MarshalText is -// written. Otherwise, the result of fmt.Sprint is written. -// -// Keys and values are quoted with [strconv.Quote] if they contain Unicode space -// characters, non-printing characters, '"' or '='. -// -// Keys inside groups consist of components (keys or group names) separated by -// dots. No further escaping is performed. -// Thus there is no way to determine from the key "a.b.c" whether there -// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c", -// or single group "a" and a key "b.c". -// If it is necessary to reconstruct the group structure of a key -// even in the presence of dots inside components, use -// [HandlerOptions.ReplaceAttr] to encode that information in the key. -// -// Each call to Handle results in a single serialized call to -// io.Writer.Write. -func (h *TextHandler) Handle(_ context.Context, r Record) error { - return h.commonHandler.handle(r) -} - -func appendTextValue(s *handleState, v Value) error { - switch v.Kind() { - case KindString: - s.appendString(v.str()) - case KindTime: - s.appendTime(v.time()) - case KindAny: - if tm, ok := v.any.(encoding.TextMarshaler); ok { - data, err := tm.MarshalText() - if err != nil { - return err - } - // TODO: avoid the conversion to string. - s.appendString(string(data)) - return nil - } - if bs, ok := byteSlice(v.any); ok { - // As of Go 1.19, this only allocates for strings longer than 32 bytes. - s.buf.WriteString(strconv.Quote(string(bs))) - return nil - } - s.appendString(fmt.Sprintf("%+v", v.Any())) - default: - *s.buf = v.append(*s.buf) - } - return nil -} - -// byteSlice returns its argument as a []byte if the argument's -// underlying type is []byte, along with a second return value of true. -// Otherwise it returns nil, false. -func byteSlice(a any) ([]byte, bool) { - if bs, ok := a.([]byte); ok { - return bs, true - } - // Like Printf's %s, we allow both the slice type and the byte element type to be named. - t := reflect.TypeOf(a) - if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - return reflect.ValueOf(a).Bytes(), true - } - return nil, false -} - -func needsQuoting(s string) bool { - if len(s) == 0 { - return true - } - for i := 0; i < len(s); { - b := s[i] - if b < utf8.RuneSelf { - // Quote anything except a backslash that would need quoting in a - // JSON string, as well as space and '=' - if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) { - return true - } - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) { - return true - } - i += size - } - return false -} diff --git a/vendor/golang.org/x/exp/slog/value.go b/vendor/golang.org/x/exp/slog/value.go deleted file mode 100644 index 3550c46fc..000000000 --- a/vendor/golang.org/x/exp/slog/value.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "fmt" - "math" - "runtime" - "strconv" - "strings" - "time" - "unsafe" - - "golang.org/x/exp/slices" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value struct { - _ [0]func() // disallow == - // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration, - // the string length for KindString, and nanoseconds since the epoch for KindTime. - num uint64 - // If any is of type Kind, then the value is in num as described above. - // If any is of type *time.Location, then the Kind is Time and time.Time value - // can be constructed from the Unix nanos in num and the location (monotonic time - // is not preserved). - // If any is of type stringptr, then the Kind is String and the string value - // consists of the length in num and the pointer in any. - // Otherwise, the Kind is Any and any is the value. - // (This implies that Attrs cannot store values of type Kind, *time.Location - // or stringptr.) - any any -} - -// Kind is the kind of a Value. -type Kind int - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. - -const ( - KindAny Kind = iota - KindBool - KindDuration - KindFloat64 - KindInt64 - KindString - KindTime - KindUint64 - KindGroup - KindLogValuer -) - -var kindStrings = []string{ - "Any", - "Bool", - "Duration", - "Float64", - "Int64", - "String", - "Time", - "Uint64", - "Group", - "LogValuer", -} - -func (k Kind) String() string { - if k >= 0 && int(k) < len(kindStrings) { - return kindStrings[k] - } - return "" -} - -// Unexported version of Kind, just so we can store Kinds in Values. -// (No user-provided value has this type.) -type kind Kind - -// Kind returns v's Kind. -func (v Value) Kind() Kind { - switch x := v.any.(type) { - case Kind: - return x - case stringptr: - return KindString - case timeLocation: - return KindTime - case groupptr: - return KindGroup - case LogValuer: - return KindLogValuer - case kind: // a kind is just a wrapper for a Kind - return KindAny - default: - return KindAny - } -} - -//////////////// Constructors - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return Value{num: uint64(v), any: KindInt64} -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return Value{num: v, any: KindUint64} -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return Value{num: math.Float64bits(v), any: KindFloat64} -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - u := uint64(0) - if v { - u = 1 - } - return Value{num: u, any: KindBool} -} - -// Unexported version of *time.Location, just so we can store *time.Locations in -// Values. (No user-provided value has this type.) -type timeLocation *time.Location - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - if v.IsZero() { - // UnixNano on the zero time is undefined, so represent the zero time - // with a nil *time.Location instead. time.Time.Location method never - // returns nil, so a Value with any == timeLocation(nil) cannot be - // mistaken for any other Value, time.Time or otherwise. - return Value{any: timeLocation(nil)} - } - return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())} -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return Value{num: uint64(v.Nanoseconds()), any: KindDuration} -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - switch v := v.(type) { - case string: - return StringValue(v) - case int: - return Int64Value(int64(v)) - case uint: - return Uint64Value(uint64(v)) - case int64: - return Int64Value(v) - case uint64: - return Uint64Value(v) - case bool: - return BoolValue(v) - case time.Duration: - return DurationValue(v) - case time.Time: - return TimeValue(v) - case uint8: - return Uint64Value(uint64(v)) - case uint16: - return Uint64Value(uint64(v)) - case uint32: - return Uint64Value(uint64(v)) - case uintptr: - return Uint64Value(uint64(v)) - case int8: - return Int64Value(int64(v)) - case int16: - return Int64Value(int64(v)) - case int32: - return Int64Value(int64(v)) - case float64: - return Float64Value(v) - case float32: - return Float64Value(float64(v)) - case []Attr: - return GroupValue(v...) - case Kind: - return Value{any: kind(v)} - case Value: - return v - default: - return Value{any: v} - } -} - -//////////////// Accessors - -// Any returns v's value as an any. -func (v Value) Any() any { - switch v.Kind() { - case KindAny: - if k, ok := v.any.(kind); ok { - return Kind(k) - } - return v.any - case KindLogValuer: - return v.any - case KindGroup: - return v.group() - case KindInt64: - return int64(v.num) - case KindUint64: - return v.num - case KindFloat64: - return v.float() - case KindString: - return v.str() - case KindBool: - return v.bool() - case KindDuration: - return v.duration() - case KindTime: - return v.time() - default: - panic(fmt.Sprintf("bad kind: %s", v.Kind())) - } -} - -// Int64 returns v's value as an int64. It panics -// if v is not a signed integer. -func (v Value) Int64() int64 { - if g, w := v.Kind(), KindInt64; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return int64(v.num) -} - -// Uint64 returns v's value as a uint64. It panics -// if v is not an unsigned integer. -func (v Value) Uint64() uint64 { - if g, w := v.Kind(), KindUint64; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return v.num -} - -// Bool returns v's value as a bool. It panics -// if v is not a bool. -func (v Value) Bool() bool { - if g, w := v.Kind(), KindBool; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return v.bool() -} - -func (v Value) bool() bool { - return v.num == 1 -} - -// Duration returns v's value as a time.Duration. It panics -// if v is not a time.Duration. -func (v Value) Duration() time.Duration { - if g, w := v.Kind(), KindDuration; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - - return v.duration() -} - -func (v Value) duration() time.Duration { - return time.Duration(int64(v.num)) -} - -// Float64 returns v's value as a float64. It panics -// if v is not a float64. -func (v Value) Float64() float64 { - if g, w := v.Kind(), KindFloat64; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - - return v.float() -} - -func (v Value) float() float64 { - return math.Float64frombits(v.num) -} - -// Time returns v's value as a time.Time. It panics -// if v is not a time.Time. -func (v Value) Time() time.Time { - if g, w := v.Kind(), KindTime; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return v.time() -} - -func (v Value) time() time.Time { - loc := v.any.(timeLocation) - if loc == nil { - return time.Time{} - } - return time.Unix(0, int64(v.num)).In(loc) -} - -// LogValuer returns v's value as a LogValuer. It panics -// if v is not a LogValuer. -func (v Value) LogValuer() LogValuer { - return v.any.(LogValuer) -} - -// Group returns v's value as a []Attr. -// It panics if v's Kind is not KindGroup. -func (v Value) Group() []Attr { - if sp, ok := v.any.(groupptr); ok { - return unsafe.Slice((*Attr)(sp), v.num) - } - panic("Group: bad kind") -} - -func (v Value) group() []Attr { - return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num) -} - -//////////////// Other - -// Equal reports whether v and w represent the same Go value. -func (v Value) Equal(w Value) bool { - k1 := v.Kind() - k2 := w.Kind() - if k1 != k2 { - return false - } - switch k1 { - case KindInt64, KindUint64, KindBool, KindDuration: - return v.num == w.num - case KindString: - return v.str() == w.str() - case KindFloat64: - return v.float() == w.float() - case KindTime: - return v.time().Equal(w.time()) - case KindAny, KindLogValuer: - return v.any == w.any // may panic if non-comparable - case KindGroup: - return slices.EqualFunc(v.group(), w.group(), Attr.Equal) - default: - panic(fmt.Sprintf("bad kind: %s", k1)) - } -} - -// append appends a text representation of v to dst. -// v is formatted as with fmt.Sprint. -func (v Value) append(dst []byte) []byte { - switch v.Kind() { - case KindString: - return append(dst, v.str()...) - case KindInt64: - return strconv.AppendInt(dst, int64(v.num), 10) - case KindUint64: - return strconv.AppendUint(dst, v.num, 10) - case KindFloat64: - return strconv.AppendFloat(dst, v.float(), 'g', -1, 64) - case KindBool: - return strconv.AppendBool(dst, v.bool()) - case KindDuration: - return append(dst, v.duration().String()...) - case KindTime: - return append(dst, v.time().String()...) - case KindGroup: - return fmt.Append(dst, v.group()) - case KindAny, KindLogValuer: - return fmt.Append(dst, v.any) - default: - panic(fmt.Sprintf("bad kind: %s", v.Kind())) - } -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer interface { - LogValue() Value -} - -const maxLogValues = 100 - -// Resolve repeatedly calls LogValue on v while it implements LogValuer, -// and returns the result. -// If v resolves to a group, the group's attributes' values are not recursively -// resolved. -// If the number of LogValue calls exceeds a threshold, a Value containing an -// error is returned. -// Resolve's return value is guaranteed not to be of Kind KindLogValuer. -func (v Value) Resolve() (rv Value) { - orig := v - defer func() { - if r := recover(); r != nil { - rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5))) - } - }() - - for i := 0; i < maxLogValues; i++ { - if v.Kind() != KindLogValuer { - return v - } - v = v.LogValuer().LogValue() - } - err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any()) - return AnyValue(err) -} - -func stack(skip, nFrames int) string { - pcs := make([]uintptr, nFrames+1) - n := runtime.Callers(skip+1, pcs) - if n == 0 { - return "(no stack)" - } - frames := runtime.CallersFrames(pcs[:n]) - var b strings.Builder - i := 0 - for { - frame, more := frames.Next() - fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line) - if !more { - break - } - i++ - if i >= nFrames { - fmt.Fprintf(&b, "(rest of stack elided)\n") - break - } - } - return b.String() -} diff --git a/vendor/golang.org/x/exp/slog/value_119.go b/vendor/golang.org/x/exp/slog/value_119.go deleted file mode 100644 index 29b0d7329..000000000 --- a/vendor/golang.org/x/exp/slog/value_119.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 && !go1.20 - -package slog - -import ( - "reflect" - "unsafe" -) - -type ( - stringptr unsafe.Pointer // used in Value.any when the Value is a string - groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr -) - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&value)) - return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)} -} - -func (v Value) str() string { - var s string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) - hdr.Data = uintptr(v.any.(stringptr)) - hdr.Len = int(v.num) - return s -} - -// String returns Value's value as a string, formatted like fmt.Sprint. Unlike -// the methods Int64, Float64, and so on, which panic if v is of the -// wrong kind, String never panics. -func (v Value) String() string { - if sp, ok := v.any.(stringptr); ok { - // Inlining this code makes a huge difference. - var s string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) - hdr.Data = uintptr(sp) - hdr.Len = int(v.num) - return s - } - return string(v.append(nil)) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as)) - return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)} -} diff --git a/vendor/golang.org/x/exp/slog/value_120.go b/vendor/golang.org/x/exp/slog/value_120.go deleted file mode 100644 index f7d4c0932..000000000 --- a/vendor/golang.org/x/exp/slog/value_120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package slog - -import "unsafe" - -type ( - stringptr *byte // used in Value.any when the Value is a string - groupptr *Attr // used in Value.any when the Value is a []Attr -) - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))} -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))} -} - -// String returns Value's value as a string, formatted like fmt.Sprint. Unlike -// the methods Int64, Float64, and so on, which panic if v is of the -// wrong kind, String never panics. -func (v Value) String() string { - if sp, ok := v.any.(stringptr); ok { - return unsafe.String(sp, v.num) - } - return string(v.append(nil)) -} - -func (v Value) str() string { - return unsafe.String(v.any.(stringptr), v.num) -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6c18ea230..ea5ae629f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go deleted file mode 100644 index 73687de74..000000000 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.5 - -package plan9 - -import "syscall" - -func fixwd() { - syscall.Fixwd() -} - -func Getwd() (wd string, err error) { - return syscall.Getwd() -} - -func Chdir(path string) error { - return syscall.Chdir(path) -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index fb9458218..7a76489db 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -2,22 +2,18 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.5 - package plan9 +import "syscall" + func fixwd() { + syscall.Fixwd() } func Getwd() (wd string, err error) { - fd, err := open(".", O_RDONLY) - if err != nil { - return "", err - } - defer Close(fd) - return Fd2path(fd) + return syscall.Getwd() } func Chdir(path string) error { - return chdir(path) + return syscall.Chdir(path) } diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81acd..3c7a6d6e2 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,9 +38,7 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { - for i := range s { - s[i] = 0 - } + clear(s[:]) } func cpuBitsIndex(cpu int) int { diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c3..d1c8b2640 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad3..7838ca5db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index abc395547..18a3d9bda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9e7a6c5a4..b6db27d93 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -328,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -492,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -528,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -555,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -844,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2025-01-17)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x31 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -937,9 +942,6 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 ETHTOOL_FAMILY_NAME = "ethtool" @@ -1213,6 +1215,7 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 @@ -1231,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1255,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1274,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1582,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1633,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1695,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1817,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2493,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2652,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2732,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2982,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -3336,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xf + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3406,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3530,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3574,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3688,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a8c421e29..1c37f9fbc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9a88d1813..6f54d34ae 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 7cb6a867e..783ec5c12 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d0ecd2c58..ca83d3ba1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 7a2940ae0..607e611c0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d14ca8f2e..b9cb5bd3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 2da1bac1e..65b078a63 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 28727514b..5298a3033 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f287b54b..7bc557c87 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 7e5f9e6aa..152399bb0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 37c87952f..1a1ce2409 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 522013361..4231a1fb5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 4bfe2b5b6..21c0e9526 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e3cffb869..f00d1cd7c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c219c8db3..bc8d539e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index c6545413c..b4609c20c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -221,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -371,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff30..aca56ee49 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb450695..2ea1ef58c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e502974..d22c8af31 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec51..5ee264ae9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a1..f9f03ebf5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a336..87c2118e8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b996227..391ad102f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9e..565615775 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc2..0482b52e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfbb..71806f08f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b44636502..e35a71058 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c18..2aea47670 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 840539169..6c9bb4e56 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790d..680bc9915 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f9..620f27105 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8bcac2835..944e75a11 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -115,7 +115,9 @@ type Statx_t struct { Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 Dio_read_offset_align uint32 - _ [9]uint64 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -629,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -686,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -737,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -2317,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2597,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3044,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3078,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -4044,7 +4093,7 @@ const ( ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 ETHTOOL_A_TSINFO_STATS = 0x6 ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 - ETHTOOL_A_TSINFO_MAX = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4130,6 +4179,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4780,7 +4842,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x150 + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 @@ -5414,7 +5476,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5530,7 +5592,7 @@ const ( NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 62db85f6c..485f2d3a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,19 +282,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -330,17 +324,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -348,10 +336,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 7d89d648d..ecbd1ad8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -300,16 +300,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -344,27 +338,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 9c0b39eec..02f0463a4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,19 +273,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -321,17 +315,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -339,10 +327,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index de9c7ff36..6f4d400d2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -279,16 +279,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -323,27 +317,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2336bd2bf..cd532cfa5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -280,16 +280,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -324,27 +318,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4711f0be1..413362085 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ab99a34b9..eaa37eb71 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 04c9866e3..98ae6a1e4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 60aa69f61..cae196159 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index cb4fad785..6ce3b4e02 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,19 +285,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,17 +327,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -351,10 +339,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 60272cfce..c7429c6a1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3f5b91bc0..4bf4baf4c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 51550f15a..e9709d70a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -307,16 +307,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -351,27 +345,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3239e50e0..fb44268ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -302,16 +302,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -346,27 +340,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index faf200278..9c38265c7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -284,16 +284,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -328,27 +322,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 958bcf47a..993a2297d 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1976,6 +1976,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index a58bc48b8..641a5f4b7 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -546,25 +546,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -574,7 +574,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -586,7 +586,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -594,7 +594,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -602,7 +602,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -610,7 +610,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -618,7 +618,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -626,7 +626,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -634,7 +634,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -642,7 +642,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -650,7 +650,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +658,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -675,7 +675,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -683,7 +683,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -691,7 +691,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -703,7 +703,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +711,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -720,7 +720,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -728,7 +728,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -736,7 +736,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -744,7 +744,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -752,7 +752,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -760,7 +760,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -768,7 +768,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -776,7 +776,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -784,7 +784,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -792,13 +792,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } @@ -806,7 +806,7 @@ func FreeSid(sid *SID) (err error) { } func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { - r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) if r1 == 0 { err = errnoErr(e1) } @@ -814,7 +814,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { } func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -829,7 +829,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -837,7 +837,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -853,7 +853,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -867,7 +867,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -876,7 +876,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -886,7 +886,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -895,7 +895,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -911,7 +911,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -921,7 +921,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -929,25 +929,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -955,7 +955,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -963,7 +963,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -979,7 +979,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -987,7 +987,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -996,25 +996,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1022,7 +1022,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1030,7 +1030,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1038,7 +1038,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1046,7 +1046,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1054,7 +1054,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1062,7 +1062,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1070,7 +1070,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1079,7 +1079,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1092,7 +1092,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1100,7 +1100,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1108,7 +1108,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1120,7 +1120,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1128,7 +1128,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1136,7 +1136,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1144,7 +1144,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1152,7 +1152,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1160,7 +1160,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1176,7 +1176,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1184,7 +1184,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1192,7 +1192,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1200,7 +1200,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1208,7 +1208,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1217,7 +1217,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1226,7 +1226,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1234,7 +1234,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1242,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1250,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1267,7 +1267,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1275,7 +1275,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1291,7 +1291,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1303,7 +1303,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1315,7 +1315,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1323,7 +1323,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1336,7 +1336,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1344,7 +1344,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1352,7 +1352,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1360,7 +1360,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1368,7 +1368,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1376,7 +1376,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1384,7 +1384,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1392,7 +1392,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,7 +1400,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1408,7 +1408,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1417,7 +1417,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1425,13 +1425,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1440,7 +1440,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1449,7 +1449,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1458,18 +1458,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1477,7 +1477,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1485,13 +1485,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1500,7 +1500,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1509,7 +1509,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1521,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1530,7 +1530,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1538,7 +1538,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1546,7 +1546,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1554,7 +1554,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1562,7 +1562,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1571,7 +1571,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1586,7 +1586,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1594,12 +1594,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1607,7 +1607,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1615,7 +1615,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1623,7 +1623,7 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { } func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1631,7 +1631,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1639,7 +1639,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1647,7 +1647,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1655,7 +1655,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1663,7 +1663,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1675,7 +1675,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1687,7 +1687,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1695,7 +1695,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1704,7 +1704,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1712,7 +1712,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1720,7 +1720,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1728,7 +1728,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1736,7 +1736,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1744,7 +1744,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1752,12 +1752,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1765,7 +1765,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1773,7 +1773,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1782,7 +1782,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1791,7 +1791,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1800,7 +1800,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1809,7 +1809,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1817,7 +1817,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1826,7 +1826,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1835,7 +1835,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1848,7 +1848,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1857,7 +1857,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1866,7 +1866,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1878,7 +1878,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1886,7 +1886,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1894,7 +1894,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1902,7 +1902,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1911,7 +1911,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1919,7 +1919,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1927,12 +1927,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1940,7 +1940,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1948,7 +1948,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1960,7 +1960,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1968,7 +1968,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1976,12 +1976,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1990,7 +1990,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +1998,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2019,7 +2019,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2028,7 +2028,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2037,7 +2037,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2046,7 +2046,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2055,7 +2055,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2063,7 +2063,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2071,7 +2071,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2079,7 +2079,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2087,7 +2087,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2096,7 +2096,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2104,7 +2104,7 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2112,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2120,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2132,7 +2132,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2141,7 +2141,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2149,7 +2149,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2157,7 +2157,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2165,19 +2165,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2185,7 +2185,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2193,7 +2193,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2201,13 +2201,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2215,7 +2215,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2223,7 +2223,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { } func GetConsoleCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2232,7 +2232,7 @@ func GetConsoleCP() (cp uint32, err error) { } func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } @@ -2240,7 +2240,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { } func GetConsoleOutputCP() (cp uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) cp = uint32(r0) if cp == 0 { err = errnoErr(e1) @@ -2249,7 +2249,7 @@ func GetConsoleOutputCP() (cp uint32, err error) { } func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2257,7 +2257,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2266,19 +2266,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2286,13 +2286,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2301,7 +2301,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2310,7 +2310,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2318,7 +2318,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2326,7 +2326,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2335,7 +2335,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2343,7 +2343,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2351,7 +2351,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2359,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2368,7 +2368,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2377,7 +2377,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2386,13 +2386,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2400,7 +2400,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2409,7 +2409,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2418,7 +2418,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2427,13 +2427,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2442,7 +2442,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) if r1 == 0 { err = errnoErr(e1) } @@ -2450,7 +2450,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2458,7 +2458,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2466,7 +2466,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } @@ -2474,7 +2474,7 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 } func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2486,7 +2486,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2494,7 +2494,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2512,7 +2512,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2521,7 +2521,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2530,7 +2530,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2538,7 +2538,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2546,7 +2546,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2554,12 +2554,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2567,7 +2567,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2576,12 +2576,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2590,7 +2590,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2599,7 +2599,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2607,17 +2607,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2626,7 +2626,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2635,7 +2635,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2643,13 +2643,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2658,7 +2658,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2666,7 +2666,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2675,7 +2675,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2683,7 +2683,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2691,7 +2691,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2699,7 +2699,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2707,7 +2707,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2715,7 +2715,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2724,7 +2724,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2736,7 +2736,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2749,7 +2749,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2766,7 +2766,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2784,7 +2784,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2793,7 +2793,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2802,7 +2802,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2811,7 +2811,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2820,7 +2820,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2828,7 +2828,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2837,7 +2837,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2846,7 +2846,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2854,7 +2854,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2862,7 +2862,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2870,7 +2870,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2878,7 +2878,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2891,7 +2891,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2904,7 +2904,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2917,7 +2917,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2930,7 +2930,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2939,7 +2939,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2947,7 +2947,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2955,7 +2955,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2963,7 +2963,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2971,7 +2971,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2979,7 +2979,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2987,7 +2987,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2996,7 +2996,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -3004,7 +3004,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -3012,7 +3012,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -3024,7 +3024,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -3036,7 +3036,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3044,7 +3044,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -3052,7 +3052,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -3060,7 +3060,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3068,7 +3068,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -3076,7 +3076,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3084,7 +3084,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -3092,7 +3092,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -3101,7 +3101,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3109,7 +3109,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3117,7 +3117,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3125,7 +3125,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -3133,7 +3133,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func SetConsoleCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3141,7 +3141,7 @@ func SetConsoleCP(cp uint32) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3149,7 +3149,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) if r1 == 0 { err = errnoErr(e1) } @@ -3157,7 +3157,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetConsoleOutputCP(cp uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3165,7 +3165,7 @@ func SetConsoleOutputCP(cp uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3173,7 +3173,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3190,7 +3190,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3198,7 +3198,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3206,7 +3206,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3214,13 +3214,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3228,7 +3228,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3236,7 +3236,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3244,7 +3244,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3252,7 +3252,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3261,7 +3261,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3269,7 +3269,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3277,7 +3277,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3285,7 +3285,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3294,7 +3294,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3302,7 +3302,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3314,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3322,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3330,7 +3330,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3338,7 +3338,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3346,7 +3346,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3354,7 +3354,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3362,7 +3362,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3370,7 +3370,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3383,13 +3383,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3397,7 +3397,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3405,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3413,7 +3413,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3421,7 +3421,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3429,7 +3429,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3437,7 +3437,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3445,7 +3445,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3454,7 +3454,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3462,7 +3462,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3470,7 +3470,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3478,7 +3478,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3486,7 +3486,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3494,7 +3494,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3502,7 +3502,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3510,13 +3510,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3528,7 +3528,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3537,7 +3537,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3546,7 +3546,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3558,7 +3558,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3566,7 +3566,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3574,7 +3574,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3582,12 +3582,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3595,7 +3595,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3603,7 +3603,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3611,7 +3611,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3619,7 +3619,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3627,7 +3627,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3635,7 +3635,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3643,7 +3643,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3651,7 +3651,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3659,7 +3659,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3667,7 +3667,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3675,7 +3675,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3683,13 +3683,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3697,13 +3697,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3711,7 +3711,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3719,18 +3719,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3738,23 +3738,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3762,7 +3762,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3770,7 +3770,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3778,7 +3778,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3786,23 +3786,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3810,7 +3810,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3818,7 +3818,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3826,7 +3826,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3834,7 +3834,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3842,7 +3842,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3850,7 +3850,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3862,7 +3862,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3874,12 +3874,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3887,7 +3887,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3895,7 +3895,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3903,7 +3903,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3911,7 +3911,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3919,7 +3919,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3927,7 +3927,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3935,7 +3935,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3944,7 +3944,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3952,7 +3952,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3960,7 +3960,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3968,7 +3968,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3976,7 +3976,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3984,7 +3984,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3993,7 +3993,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4001,7 +4001,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -4009,7 +4009,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4017,7 +4017,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4025,7 +4025,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -4033,7 +4033,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4041,7 +4041,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4049,7 +4049,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4057,7 +4057,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4065,7 +4065,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -4074,7 +4074,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4082,7 +4082,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -4090,7 +4090,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -4098,7 +4098,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4106,7 +4106,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -4114,7 +4114,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -4122,7 +4122,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4131,7 +4131,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4139,7 +4139,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4147,12 +4147,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4160,7 +4160,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4168,7 +4168,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4177,19 +4177,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -4197,19 +4197,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { } func GetKeyboardLayout(tid uint32) (hkl Handle) { - r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) hkl = Handle(r0) return } func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4218,25 +4218,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) hkl = Handle(r0) if hkl == 0 { err = errnoErr(e1) @@ -4245,7 +4245,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { } func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4254,13 +4254,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i } func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { - r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) ret = int32(r0) return } func UnloadKeyboardLayout(hkl Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) if r1 == 0 { err = errnoErr(e1) } @@ -4272,7 +4272,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4280,7 +4280,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4288,7 +4288,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4305,7 +4305,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4323,7 +4323,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4340,7 +4340,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4348,7 +4348,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4356,7 +4356,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4364,7 +4364,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4372,12 +4372,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4385,7 +4385,7 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } @@ -4393,7 +4393,7 @@ func WSACleanup() (err error) { } func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { - r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) if r1 != 0 { err = errnoErr(e1) } @@ -4401,7 +4401,7 @@ func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err } func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4414,7 +4414,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4422,7 +4422,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4430,7 +4430,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4438,7 +4438,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4446,7 +4446,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4454,7 +4454,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4462,7 +4462,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4470,7 +4470,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4478,7 +4478,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4486,7 +4486,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4495,7 +4495,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4503,7 +4503,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4511,7 +4511,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4519,7 +4519,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4536,7 +4536,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4545,7 +4545,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4562,7 +4562,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4585,7 +4585,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4594,7 +4594,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4602,7 +4602,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4610,7 +4610,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4618,7 +4618,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4628,7 +4628,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4641,7 +4641,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4649,7 +4649,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4657,7 +4657,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4665,7 +4665,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4674,7 +4674,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4682,12 +4682,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index df6bf948e..0ddd81c02 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index 13e9a64ad..bddb2e2ae 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/vendor/golang.org/x/text/feature/plural/common.go b/vendor/golang.org/x/text/feature/plural/common.go new file mode 100644 index 000000000..fdcb373fd --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/common.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// Form defines a plural form. +// +// Not all languages support all forms. Also, the meaning of each form varies +// per language. It is important to note that the name of a form does not +// necessarily correspond one-to-one with the set of numbers. For instance, +// for Croation, One matches not only 1, but also 11, 21, etc. +// +// Each language must at least support the form "other". +type Form byte + +const ( + Other Form = iota + Zero + One + Two + Few + Many +) + +var countMap = map[string]Form{ + "other": Other, + "zero": Zero, + "one": One, + "two": Two, + "few": Few, + "many": Many, +} + +type pluralCheck struct { + // category: + // 3..7: opID + // 0..2: category + cat byte + setID byte +} + +// opID identifies the type of operand in the plural rule, being i, n or f. +// (v, w, and t are treated as filters in our implementation.) +type opID byte + +const ( + opMod opID = 0x1 // is '%' used? + opNotEqual opID = 0x2 // using "!=" to compare + opI opID = 0 << 2 // integers after taking the absolute value + opN opID = 1 << 2 // full number (must be integer) + opF opID = 2 << 2 // fraction + opV opID = 3 << 2 // number of visible digits + opW opID = 4 << 2 // number of visible digits without trailing zeros + opBretonM opID = 5 << 2 // hard-wired rule for Breton + opItalian800 opID = 6 << 2 // hard-wired rule for Italian + opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan +) +const ( + // Use this plural form to indicate the next rule needs to match as well. + // The last condition in the list will have the correct plural form. + andNext = 0x7 + formMask = 0x7 + + opShift = 3 + + // numN indicates the maximum integer, or maximum mod value, for which we + // have inclusion masks. + numN = 100 + // The common denominator of the modulo that is taken. + maxMod = 100 +) diff --git a/vendor/golang.org/x/text/feature/plural/message.go b/vendor/golang.org/x/text/feature/plural/message.go new file mode 100644 index 000000000..56d518cc3 --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/message.go @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "fmt" + "io" + "reflect" + "strconv" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// TODO: consider deleting this interface. Maybe VisibleDigits is always +// sufficient and practical. + +// Interface is used for types that can determine their own plural form. +type Interface interface { + // PluralForm reports the plural form for the given language of the + // underlying value. It also returns the integer value. If the integer value + // is larger than fits in n, PluralForm may return a value modulo + // 10,000,000. + PluralForm(t language.Tag, scale int) (f Form, n int) +} + +// Selectf returns the first case for which its selector is a match for the +// arg-th substitution argument to a formatting call, formatting it as indicated +// by format. +// +// The cases argument are pairs of selectors and messages. Selectors are of type +// string or Form. Messages are of type string or catalog.Message. A selector +// matches an argument if: +// - it is "other" or Other +// - it matches the plural form of the argument: "zero", "one", "two", "few", +// or "many", or the equivalent Form +// - it is of the form "=x" where x is an integer that matches the value of +// the argument. +// - it is of the form " kindDefault { + e.EncodeUint(uint64(m.scale)) + } + + forms := validForms(cardinal, e.Language()) + + for i := 0; i < len(m.cases); { + if err := compileSelector(e, forms, m.cases[i]); err != nil { + return err + } + if i++; i >= len(m.cases) { + return fmt.Errorf("plural: no message defined for selector %v", m.cases[i-1]) + } + var msg catalog.Message + switch x := m.cases[i].(type) { + case string: + msg = catalog.String(x) + case catalog.Message: + msg = x + default: + return fmt.Errorf("plural: message of type %T; must be string or catalog.Message", x) + } + if err := e.EncodeMessage(msg); err != nil { + return err + } + i++ + } + return nil +} + +func compileSelector(e *catmsg.Encoder, valid []Form, selector interface{}) error { + form := Other + switch x := selector.(type) { + case string: + if x == "" { + return fmt.Errorf("plural: empty selector") + } + if c := x[0]; c == '=' || c == '<' { + val, err := strconv.ParseUint(x[1:], 10, 16) + if err != nil { + return fmt.Errorf("plural: invalid number in selector %q: %v", selector, err) + } + e.EncodeUint(uint64(c)) + e.EncodeUint(val) + return nil + } + var ok bool + form, ok = countMap[x] + if !ok { + return fmt.Errorf("plural: invalid plural form %q", selector) + } + case Form: + form = x + default: + return fmt.Errorf("plural: selector of type %T; want string or Form", selector) + } + + ok := false + for _, f := range valid { + if f == form { + ok = true + break + } + } + if !ok { + return fmt.Errorf("plural: form %q not supported for language %q", selector, e.Language()) + } + e.EncodeUint(uint64(form)) + return nil +} + +func execute(d *catmsg.Decoder) bool { + lang := d.Language() + argN := int(d.DecodeUint()) + kind := int(d.DecodeUint()) + scale := -1 // default + if kind > kindDefault { + scale = int(d.DecodeUint()) + } + form := Other + n := -1 + if arg := d.Arg(argN); arg == nil { + // Default to Other. + } else if x, ok := arg.(number.VisibleDigits); ok { + d := x.Digits(nil, lang, scale) + form, n = cardinal.matchDisplayDigits(lang, &d) + } else if x, ok := arg.(Interface); ok { + // This covers lists and formatters from the number package. + form, n = x.PluralForm(lang, scale) + } else { + var f number.Formatter + switch kind { + case kindScale: + f.InitDecimal(lang) + f.SetScale(scale) + case kindScientific: + f.InitScientific(lang) + f.SetScale(scale) + case kindPrecision: + f.InitDecimal(lang) + f.SetPrecision(scale) + case kindDefault: + // sensible default + f.InitDecimal(lang) + if k := reflect.TypeOf(arg).Kind(); reflect.Int <= k && k <= reflect.Uintptr { + f.SetScale(0) + } else { + f.SetScale(2) + } + } + var dec number.Decimal // TODO: buffer in Printer + dec.Convert(f.RoundingContext, arg) + v := number.FormatDigits(&dec, f.RoundingContext) + if !v.NaN && !v.Inf { + form, n = cardinal.matchDisplayDigits(d.Language(), &v) + } + } + for !d.Done() { + f := d.DecodeUint() + if (f == '=' && n == int(d.DecodeUint())) || + (f == '<' && 0 <= n && n < int(d.DecodeUint())) || + form == Form(f) || + Other == Form(f) { + return d.ExecuteMessage() + } + d.SkipMessage() + } + return false +} diff --git a/vendor/golang.org/x/text/feature/plural/plural.go b/vendor/golang.org/x/text/feature/plural/plural.go new file mode 100644 index 000000000..e9f2d42e0 --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/plural.go @@ -0,0 +1,262 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package plural provides utilities for handling linguistic plurals in text. +// +// The definitions in this package are based on the plural rule handling defined +// in CLDR. See +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules for +// details. +package plural + +import ( + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" +) + +// Rules defines the plural rules for all languages for a certain plural type. +// +// This package is UNDER CONSTRUCTION and its API may change. +type Rules struct { + rules []pluralCheck + index []byte + langToIndex []byte + inclusionMasks []uint64 +} + +var ( + // Cardinal defines the plural rules for numbers indicating quantities. + Cardinal *Rules = cardinal + + // Ordinal defines the plural rules for numbers indicating position + // (first, second, etc.). + Ordinal *Rules = ordinal + + ordinal = &Rules{ + ordinalRules, + ordinalIndex, + ordinalLangToIndex, + ordinalInclusionMasks[:], + } + + cardinal = &Rules{ + cardinalRules, + cardinalIndex, + cardinalLangToIndex, + cardinalInclusionMasks[:], + } +) + +// getIntApprox converts the digits in slice digits[start:end] to an integer +// according to the following rules: +// - Let i be asInt(digits[start:end]), where out-of-range digits are assumed +// to be zero. +// - Result n is big if i / 10^nMod > 1. +// - Otherwise the result is i % 10^nMod. +// +// For example, if digits is {1, 2, 3} and start:end is 0:5, then the result +// for various values of nMod is: +// - when nMod == 2, n == big +// - when nMod == 3, n == big +// - when nMod == 4, n == big +// - when nMod == 5, n == 12300 +// - when nMod == 6, n == 12300 +// - when nMod == 7, n == 12300 +func getIntApprox(digits []byte, start, end, nMod, big int) (n int) { + // Leading 0 digits just result in 0. + p := start + if p < 0 { + p = 0 + } + // Range only over the part for which we have digits. + mid := end + if mid >= len(digits) { + mid = len(digits) + } + // Check digits more significant that nMod. + if q := end - nMod; q > 0 { + if q > mid { + q = mid + } + for ; p < q; p++ { + if digits[p] != 0 { + return big + } + } + } + for ; p < mid; p++ { + n = 10*n + int(digits[p]) + } + // Multiply for trailing zeros. + for ; p < end; p++ { + n *= 10 + } + return n +} + +// MatchDigits computes the plural form for the given language and the given +// decimal floating point digits. The digits are stored in big-endian order and +// are of value byte(0) - byte(9). The floating point position is indicated by +// exp and the number of visible decimals is scale. All leading and trailing +// zeros may be omitted from digits. +// +// The following table contains examples of possible arguments to represent +// the given numbers. +// +// decimal digits exp scale +// 123 []byte{1, 2, 3} 3 0 +// 123.4 []byte{1, 2, 3, 4} 3 1 +// 123.40 []byte{1, 2, 3, 4} 3 2 +// 100000 []byte{1} 6 0 +// 100000.00 []byte{1} 6 3 +func (p *Rules) MatchDigits(t language.Tag, digits []byte, exp, scale int) Form { + index := tagToID(t) + + // Differentiate up to including mod 1000000 for the integer part. + n := getIntApprox(digits, 0, exp, 6, 1000000) + + // Differentiate up to including mod 100 for the fractional part. + f := getIntApprox(digits, exp, exp+scale, 2, 100) + + return matchPlural(p, index, n, f, scale) +} + +func (p *Rules) matchDisplayDigits(t language.Tag, d *number.Digits) (Form, int) { + n := getIntApprox(d.Digits, 0, int(d.Exp), 6, 1000000) + return p.MatchDigits(t, d.Digits, int(d.Exp), d.NumFracDigits()), n +} + +func validForms(p *Rules, t language.Tag) (forms []Form) { + offset := p.langToIndex[tagToID(t)] + rules := p.rules[p.index[offset]:p.index[offset+1]] + + forms = append(forms, Other) + last := Other + for _, r := range rules { + if cat := Form(r.cat & formMask); cat != andNext && last != cat { + forms = append(forms, cat) + last = cat + } + } + return forms +} + +func (p *Rules) matchComponents(t language.Tag, n, f, scale int) Form { + return matchPlural(p, tagToID(t), n, f, scale) +} + +// MatchPlural returns the plural form for the given language and plural +// operands (as defined in +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules): +// +// where +// n absolute value of the source number (integer and decimals) +// input +// i integer digits of n. +// v number of visible fraction digits in n, with trailing zeros. +// w number of visible fraction digits in n, without trailing zeros. +// f visible fractional digits in n, with trailing zeros (f = t * 10^(v-w)) +// t visible fractional digits in n, without trailing zeros. +// +// If any of the operand values is too large to fit in an int, it is okay to +// pass the value modulo 10,000,000. +func (p *Rules) MatchPlural(lang language.Tag, i, v, w, f, t int) Form { + return matchPlural(p, tagToID(lang), i, f, v) +} + +func matchPlural(p *Rules, index compact.ID, n, f, v int) Form { + nMask := p.inclusionMasks[n%maxMod] + // Compute the fMask inline in the rules below, as it is relatively rare. + // fMask := p.inclusionMasks[f%maxMod] + vMask := p.inclusionMasks[v%maxMod] + + // Do the matching + offset := p.langToIndex[index] + rules := p.rules[p.index[offset]:p.index[offset+1]] + for i := 0; i < len(rules); i++ { + rule := rules[i] + setBit := uint64(1 << rule.setID) + var skip bool + switch op := opID(rule.cat >> opShift); op { + case opI: // i = x + skip = n >= numN || nMask&setBit == 0 + + case opI | opNotEqual: // i != x + skip = n < numN && nMask&setBit != 0 + + case opI | opMod: // i % m = x + skip = nMask&setBit == 0 + + case opI | opMod | opNotEqual: // i % m != x + skip = nMask&setBit != 0 + + case opN: // n = x + skip = f != 0 || n >= numN || nMask&setBit == 0 + + case opN | opNotEqual: // n != x + skip = f == 0 && n < numN && nMask&setBit != 0 + + case opN | opMod: // n % m = x + skip = f != 0 || nMask&setBit == 0 + + case opN | opMod | opNotEqual: // n % m != x + skip = f == 0 && nMask&setBit != 0 + + case opF: // f = x + skip = f >= numN || p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opNotEqual: // f != x + skip = f < numN && p.inclusionMasks[f%maxMod]&setBit != 0 + + case opF | opMod: // f % m = x + skip = p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opMod | opNotEqual: // f % m != x + skip = p.inclusionMasks[f%maxMod]&setBit != 0 + + case opV: // v = x + skip = v < numN && vMask&setBit == 0 + + case opV | opNotEqual: // v != x + skip = v < numN && vMask&setBit != 0 + + case opW: // w == 0 + skip = f != 0 + + case opW | opNotEqual: // w != 0 + skip = f == 0 + + // Hard-wired rules that cannot be handled by our algorithm. + + case opBretonM: + skip = f != 0 || n == 0 || n%1000000 != 0 + + case opAzerbaijan00s: + // 100,200,300,400,500,600,700,800,900 + skip = n == 0 || n >= 1000 || n%100 != 0 + + case opItalian800: + skip = (f != 0 || n >= numN || nMask&setBit == 0) && n != 800 + } + if skip { + // advance over AND entries. + for ; i < len(rules) && rules[i].cat&formMask == andNext; i++ { + } + continue + } + // return if we have a final entry. + if cat := rule.cat & formMask; cat != andNext { + return Form(cat) + } + } + return Other +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/vendor/golang.org/x/text/feature/plural/tables.go b/vendor/golang.org/x/text/feature/plural/tables.go new file mode 100644 index 000000000..b06b9cb4e --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/tables.go @@ -0,0 +1,552 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var ordinalRules = []pluralCheck{ // 64 elements + 0: {cat: 0x2f, setID: 0x4}, + 1: {cat: 0x3a, setID: 0x5}, + 2: {cat: 0x22, setID: 0x1}, + 3: {cat: 0x22, setID: 0x6}, + 4: {cat: 0x22, setID: 0x7}, + 5: {cat: 0x2f, setID: 0x8}, + 6: {cat: 0x3c, setID: 0x9}, + 7: {cat: 0x2f, setID: 0xa}, + 8: {cat: 0x3c, setID: 0xb}, + 9: {cat: 0x2c, setID: 0xc}, + 10: {cat: 0x24, setID: 0xd}, + 11: {cat: 0x2d, setID: 0xe}, + 12: {cat: 0x2d, setID: 0xf}, + 13: {cat: 0x2f, setID: 0x10}, + 14: {cat: 0x35, setID: 0x3}, + 15: {cat: 0xc5, setID: 0x11}, + 16: {cat: 0x2, setID: 0x1}, + 17: {cat: 0x5, setID: 0x3}, + 18: {cat: 0xd, setID: 0x12}, + 19: {cat: 0x22, setID: 0x1}, + 20: {cat: 0x2f, setID: 0x13}, + 21: {cat: 0x3d, setID: 0x14}, + 22: {cat: 0x2f, setID: 0x15}, + 23: {cat: 0x3a, setID: 0x16}, + 24: {cat: 0x2f, setID: 0x17}, + 25: {cat: 0x3b, setID: 0x18}, + 26: {cat: 0x2f, setID: 0xa}, + 27: {cat: 0x3c, setID: 0xb}, + 28: {cat: 0x22, setID: 0x1}, + 29: {cat: 0x23, setID: 0x19}, + 30: {cat: 0x24, setID: 0x1a}, + 31: {cat: 0x22, setID: 0x1b}, + 32: {cat: 0x23, setID: 0x2}, + 33: {cat: 0x24, setID: 0x1a}, + 34: {cat: 0xf, setID: 0x15}, + 35: {cat: 0x1a, setID: 0x16}, + 36: {cat: 0xf, setID: 0x17}, + 37: {cat: 0x1b, setID: 0x18}, + 38: {cat: 0xf, setID: 0x1c}, + 39: {cat: 0x1d, setID: 0x1d}, + 40: {cat: 0xa, setID: 0x1e}, + 41: {cat: 0xa, setID: 0x1f}, + 42: {cat: 0xc, setID: 0x20}, + 43: {cat: 0xe4, setID: 0x0}, + 44: {cat: 0x5, setID: 0x3}, + 45: {cat: 0xd, setID: 0xe}, + 46: {cat: 0xd, setID: 0x21}, + 47: {cat: 0x22, setID: 0x1}, + 48: {cat: 0x23, setID: 0x19}, + 49: {cat: 0x24, setID: 0x1a}, + 50: {cat: 0x25, setID: 0x22}, + 51: {cat: 0x22, setID: 0x23}, + 52: {cat: 0x23, setID: 0x19}, + 53: {cat: 0x24, setID: 0x1a}, + 54: {cat: 0x25, setID: 0x22}, + 55: {cat: 0x22, setID: 0x24}, + 56: {cat: 0x23, setID: 0x19}, + 57: {cat: 0x24, setID: 0x1a}, + 58: {cat: 0x25, setID: 0x22}, + 59: {cat: 0x21, setID: 0x25}, + 60: {cat: 0x22, setID: 0x1}, + 61: {cat: 0x23, setID: 0x2}, + 62: {cat: 0x24, setID: 0x26}, + 63: {cat: 0x25, setID: 0x27}, +} // Size: 152 bytes + +var ordinalIndex = []uint8{ // 22 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x05, 0x07, 0x09, + 0x0b, 0x0f, 0x10, 0x13, 0x16, 0x1c, 0x1f, 0x22, + 0x28, 0x2f, 0x33, 0x37, 0x3b, 0x40, +} // Size: 46 bytes + +var ordinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, + 0x10, 0x10, 0x10, 0x00, 0x00, 0x05, 0x05, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x12, 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + // Entry C0 - FF + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x09, 0x09, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x0d, 0x0d, 0x02, 0x02, 0x02, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x13, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x0b, 0x0b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x07, 0x07, 0x02, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x0c, +} // Size: 799 bytes + +var ordinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000002000010009, 0x00000018482000d3, 0x0000000042840195, 0x000000410a040581, + 0x00000041040c0081, 0x0000009840040041, 0x0000008400045001, 0x0000003850040001, + 0x0000003850060001, 0x0000003800049001, 0x0000000800052001, 0x0000000040660031, + 0x0000000041840331, 0x0000000100040f01, 0x00000001001c0001, 0x0000000040040001, + 0x0000000000045001, 0x0000000070040001, 0x0000000070040001, 0x0000000000049001, + 0x0000000080050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000000010001, 0x0000000040200011, + // Entry 20 - 3F + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + // Entry 40 - 5F + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000080070001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000200010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + // Entry 60 - 7F + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, +} // Size: 824 bytes + +// Slots used for ordinal: 40 of 0xFF rules; 16 of 0xFF indexes; 40 of 64 sets + +var cardinalRules = []pluralCheck{ // 166 elements + 0: {cat: 0x2, setID: 0x3}, + 1: {cat: 0x22, setID: 0x1}, + 2: {cat: 0x2, setID: 0x4}, + 3: {cat: 0x2, setID: 0x4}, + 4: {cat: 0x7, setID: 0x1}, + 5: {cat: 0x62, setID: 0x3}, + 6: {cat: 0x22, setID: 0x4}, + 7: {cat: 0x7, setID: 0x3}, + 8: {cat: 0x42, setID: 0x1}, + 9: {cat: 0x22, setID: 0x4}, + 10: {cat: 0x22, setID: 0x4}, + 11: {cat: 0x22, setID: 0x5}, + 12: {cat: 0x22, setID: 0x1}, + 13: {cat: 0x22, setID: 0x1}, + 14: {cat: 0x7, setID: 0x4}, + 15: {cat: 0x92, setID: 0x3}, + 16: {cat: 0xf, setID: 0x6}, + 17: {cat: 0x1f, setID: 0x7}, + 18: {cat: 0x82, setID: 0x3}, + 19: {cat: 0x92, setID: 0x3}, + 20: {cat: 0xf, setID: 0x6}, + 21: {cat: 0x62, setID: 0x3}, + 22: {cat: 0x4a, setID: 0x6}, + 23: {cat: 0x7, setID: 0x8}, + 24: {cat: 0x62, setID: 0x3}, + 25: {cat: 0x1f, setID: 0x9}, + 26: {cat: 0x62, setID: 0x3}, + 27: {cat: 0x5f, setID: 0x9}, + 28: {cat: 0x72, setID: 0x3}, + 29: {cat: 0x29, setID: 0xa}, + 30: {cat: 0x29, setID: 0xb}, + 31: {cat: 0x4f, setID: 0xb}, + 32: {cat: 0x61, setID: 0x2}, + 33: {cat: 0x2f, setID: 0x6}, + 34: {cat: 0x3a, setID: 0x7}, + 35: {cat: 0x4f, setID: 0x6}, + 36: {cat: 0x5f, setID: 0x7}, + 37: {cat: 0x62, setID: 0x2}, + 38: {cat: 0x4f, setID: 0x6}, + 39: {cat: 0x72, setID: 0x2}, + 40: {cat: 0x21, setID: 0x3}, + 41: {cat: 0x7, setID: 0x4}, + 42: {cat: 0x32, setID: 0x3}, + 43: {cat: 0x21, setID: 0x3}, + 44: {cat: 0x22, setID: 0x1}, + 45: {cat: 0x22, setID: 0x1}, + 46: {cat: 0x23, setID: 0x2}, + 47: {cat: 0x2, setID: 0x3}, + 48: {cat: 0x22, setID: 0x1}, + 49: {cat: 0x24, setID: 0xc}, + 50: {cat: 0x7, setID: 0x1}, + 51: {cat: 0x62, setID: 0x3}, + 52: {cat: 0x74, setID: 0x3}, + 53: {cat: 0x24, setID: 0x3}, + 54: {cat: 0x2f, setID: 0xd}, + 55: {cat: 0x34, setID: 0x1}, + 56: {cat: 0xf, setID: 0x6}, + 57: {cat: 0x1f, setID: 0x7}, + 58: {cat: 0x62, setID: 0x3}, + 59: {cat: 0x4f, setID: 0x6}, + 60: {cat: 0x5a, setID: 0x7}, + 61: {cat: 0xf, setID: 0xe}, + 62: {cat: 0x1f, setID: 0xf}, + 63: {cat: 0x64, setID: 0x3}, + 64: {cat: 0x4f, setID: 0xe}, + 65: {cat: 0x5c, setID: 0xf}, + 66: {cat: 0x22, setID: 0x10}, + 67: {cat: 0x23, setID: 0x11}, + 68: {cat: 0x24, setID: 0x12}, + 69: {cat: 0xf, setID: 0x1}, + 70: {cat: 0x62, setID: 0x3}, + 71: {cat: 0xf, setID: 0x2}, + 72: {cat: 0x63, setID: 0x3}, + 73: {cat: 0xf, setID: 0x13}, + 74: {cat: 0x64, setID: 0x3}, + 75: {cat: 0x74, setID: 0x3}, + 76: {cat: 0xf, setID: 0x1}, + 77: {cat: 0x62, setID: 0x3}, + 78: {cat: 0x4a, setID: 0x1}, + 79: {cat: 0xf, setID: 0x2}, + 80: {cat: 0x63, setID: 0x3}, + 81: {cat: 0x4b, setID: 0x2}, + 82: {cat: 0xf, setID: 0x13}, + 83: {cat: 0x64, setID: 0x3}, + 84: {cat: 0x4c, setID: 0x13}, + 85: {cat: 0x7, setID: 0x1}, + 86: {cat: 0x62, setID: 0x3}, + 87: {cat: 0x7, setID: 0x2}, + 88: {cat: 0x63, setID: 0x3}, + 89: {cat: 0x2f, setID: 0xa}, + 90: {cat: 0x37, setID: 0x14}, + 91: {cat: 0x65, setID: 0x3}, + 92: {cat: 0x7, setID: 0x1}, + 93: {cat: 0x62, setID: 0x3}, + 94: {cat: 0x7, setID: 0x15}, + 95: {cat: 0x64, setID: 0x3}, + 96: {cat: 0x75, setID: 0x3}, + 97: {cat: 0x7, setID: 0x1}, + 98: {cat: 0x62, setID: 0x3}, + 99: {cat: 0xf, setID: 0xe}, + 100: {cat: 0x1f, setID: 0xf}, + 101: {cat: 0x64, setID: 0x3}, + 102: {cat: 0xf, setID: 0x16}, + 103: {cat: 0x17, setID: 0x1}, + 104: {cat: 0x65, setID: 0x3}, + 105: {cat: 0xf, setID: 0x17}, + 106: {cat: 0x65, setID: 0x3}, + 107: {cat: 0xf, setID: 0xf}, + 108: {cat: 0x65, setID: 0x3}, + 109: {cat: 0x2f, setID: 0x6}, + 110: {cat: 0x3a, setID: 0x7}, + 111: {cat: 0x2f, setID: 0xe}, + 112: {cat: 0x3c, setID: 0xf}, + 113: {cat: 0x2d, setID: 0xa}, + 114: {cat: 0x2d, setID: 0x17}, + 115: {cat: 0x2d, setID: 0x18}, + 116: {cat: 0x2f, setID: 0x6}, + 117: {cat: 0x3a, setID: 0xb}, + 118: {cat: 0x2f, setID: 0x19}, + 119: {cat: 0x3c, setID: 0xb}, + 120: {cat: 0x55, setID: 0x3}, + 121: {cat: 0x22, setID: 0x1}, + 122: {cat: 0x24, setID: 0x3}, + 123: {cat: 0x2c, setID: 0xc}, + 124: {cat: 0x2d, setID: 0xb}, + 125: {cat: 0xf, setID: 0x6}, + 126: {cat: 0x1f, setID: 0x7}, + 127: {cat: 0x62, setID: 0x3}, + 128: {cat: 0xf, setID: 0xe}, + 129: {cat: 0x1f, setID: 0xf}, + 130: {cat: 0x64, setID: 0x3}, + 131: {cat: 0xf, setID: 0xa}, + 132: {cat: 0x65, setID: 0x3}, + 133: {cat: 0xf, setID: 0x17}, + 134: {cat: 0x65, setID: 0x3}, + 135: {cat: 0xf, setID: 0x18}, + 136: {cat: 0x65, setID: 0x3}, + 137: {cat: 0x2f, setID: 0x6}, + 138: {cat: 0x3a, setID: 0x1a}, + 139: {cat: 0x2f, setID: 0x1b}, + 140: {cat: 0x3b, setID: 0x1c}, + 141: {cat: 0x2f, setID: 0x1d}, + 142: {cat: 0x3c, setID: 0x1e}, + 143: {cat: 0x37, setID: 0x3}, + 144: {cat: 0xa5, setID: 0x0}, + 145: {cat: 0x22, setID: 0x1}, + 146: {cat: 0x23, setID: 0x2}, + 147: {cat: 0x24, setID: 0x1f}, + 148: {cat: 0x25, setID: 0x20}, + 149: {cat: 0xf, setID: 0x6}, + 150: {cat: 0x62, setID: 0x3}, + 151: {cat: 0xf, setID: 0x1b}, + 152: {cat: 0x63, setID: 0x3}, + 153: {cat: 0xf, setID: 0x21}, + 154: {cat: 0x64, setID: 0x3}, + 155: {cat: 0x75, setID: 0x3}, + 156: {cat: 0x21, setID: 0x3}, + 157: {cat: 0x22, setID: 0x1}, + 158: {cat: 0x23, setID: 0x2}, + 159: {cat: 0x2c, setID: 0x22}, + 160: {cat: 0x2d, setID: 0x5}, + 161: {cat: 0x21, setID: 0x3}, + 162: {cat: 0x22, setID: 0x1}, + 163: {cat: 0x23, setID: 0x2}, + 164: {cat: 0x24, setID: 0x23}, + 165: {cat: 0x25, setID: 0x24}, +} // Size: 356 bytes + +var cardinalIndex = []uint8{ // 36 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x06, 0x09, 0x0a, + 0x0c, 0x0d, 0x10, 0x14, 0x17, 0x1d, 0x28, 0x2b, + 0x2d, 0x2f, 0x32, 0x38, 0x42, 0x45, 0x4c, 0x55, + 0x5c, 0x61, 0x6d, 0x74, 0x79, 0x7d, 0x89, 0x91, + 0x95, 0x9c, 0xa1, 0xa6, +} // Size: 60 bytes + +var cardinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x06, 0x06, + 0x01, 0x01, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x01, 0x01, 0x08, 0x08, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x1a, 0x1a, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x00, 0x00, + // Entry 40 - 7F + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x1e, 0x1e, + 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x18, 0x18, 0x00, 0x00, 0x22, 0x22, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x00, 0x00, 0x16, 0x16, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + // Entry 100 - 13F + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x08, 0x08, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x0c, 0x0c, + 0x08, 0x08, 0x08, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x08, 0x08, 0x04, 0x04, 0x1f, 0x1f, + 0x14, 0x14, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, + 0x01, 0x01, 0x06, 0x00, 0x00, 0x20, 0x20, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x17, 0x17, 0x01, + 0x01, 0x13, 0x13, 0x13, 0x16, 0x16, 0x08, 0x08, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x04, 0x0a, 0x0a, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x10, 0x17, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x04, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x02, + 0x02, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x0f, 0x0f, 0x08, 0x10, + // Entry 1C0 - 1FF + 0x10, 0x08, 0x08, 0x0e, 0x0e, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x0d, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, + 0x00, 0x00, 0x08, 0x08, 0x0b, 0x0b, 0x08, 0x08, + 0x08, 0x08, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x08, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x08, + 0x06, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x08, 0x19, 0x19, 0x0d, 0x0d, + 0x08, 0x08, 0x03, 0x04, 0x03, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x12, + 0x12, 0x12, 0x08, 0x08, 0x1d, 0x1d, 0x1d, 0x1d, + 0x1d, 0x1d, 0x1d, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x13, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x05, 0x05, 0x18, 0x18, 0x15, 0x15, 0x10, 0x10, + // Entry 280 - 2BF + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x06, + 0x08, 0x08, 0x08, 0x0c, 0x08, 0x00, 0x00, 0x08, + // Entry 2C0 - 2FF + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x07, + 0x07, 0x08, 0x08, 0x1d, 0x1d, 0x04, 0x04, 0x04, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x00, 0x00, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x01, 0x01, 0x04, 0x04, +} // Size: 799 bytes + +var cardinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000000200500419, 0x0000000000512153, 0x000000000a327105, 0x0000000ca23c7101, + 0x00000004a23c7201, 0x0000000482943001, 0x0000001482943201, 0x0000000502943001, + 0x0000000502943001, 0x0000000522943201, 0x0000000540543401, 0x00000000454128e1, + 0x000000005b02e821, 0x000000006304e821, 0x000000006304ea21, 0x0000000042842821, + 0x0000000042842a21, 0x0000000042842821, 0x0000000042842821, 0x0000000062842a21, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + // Entry 20 - 3F + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + // Entry 40 - 5F + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + // Entry 60 - 7F + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, +} // Size: 824 bytes + +// Slots used for cardinal: A6 of 0xFF rules; 24 of 0xFF indexes; 37 of 64 sets + +// Total table size 3860 bytes (3KiB); checksum: AAFBF21 diff --git a/vendor/golang.org/x/text/internal/catmsg/catmsg.go b/vendor/golang.org/x/text/internal/catmsg/catmsg.go new file mode 100644 index 000000000..1b257a7b4 --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/catmsg.go @@ -0,0 +1,417 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catmsg contains support types for package x/text/message/catalog. +// +// This package contains the low-level implementations of Message used by the +// catalog package and provides primitives for other packages to implement their +// own. For instance, the plural package provides functionality for selecting +// translation strings based on the plural category of substitution arguments. +// +// # Encoding and Decoding +// +// Catalogs store Messages encoded as a single string. Compiling a message into +// a string both results in compacter representation and speeds up evaluation. +// +// A Message must implement a Compile method to convert its arbitrary +// representation to a string. The Compile method takes an Encoder which +// facilitates serializing the message. Encoders also provide more context of +// the messages's creation (such as for which language the message is intended), +// which may not be known at the time of the creation of the message. +// +// Each message type must also have an accompanying decoder registered to decode +// the message. This decoder takes a Decoder argument which provides the +// counterparts for the decoding. +// +// # Renderers +// +// A Decoder must be initialized with a Renderer implementation. These +// implementations must be provided by packages that use Catalogs, typically +// formatting packages such as x/text/message. A typical user will not need to +// worry about this type; it is only relevant to packages that do string +// formatting and want to use the catalog package to handle localized strings. +// +// A package that uses catalogs for selecting strings receives selection results +// as sequence of substrings passed to the Renderer. The following snippet shows +// how to express the above example using the message package. +// +// message.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Select(1, "one", "minute")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) // always 5 minutes late. +// +// To evaluate the Printf, package message wraps the arguments in a Renderer +// that is passed to the catalog for message decoding. The call sequence that +// results from evaluating the above message, assuming the person is rather +// tardy, is: +// +// Render("You are %[1]d ") +// Arg(1) +// Render("minutes") +// Render(" late.") +// +// The calls to Arg is caused by the plural.Select execution, which evaluates +// the argument to determine whether the singular or plural message form should +// be selected. The calls to Render reports the partial results to the message +// package for further evaluation. +package catmsg + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "golang.org/x/text/language" +) + +// A Handle refers to a registered message type. +type Handle int + +// A Handler decodes and evaluates data compiled by a Message and sends the +// result to the Decoder. The output may depend on the value of the substitution +// arguments, accessible by the Decoder's Arg method. The Handler returns false +// if there is no translation for the given substitution arguments. +type Handler func(d *Decoder) bool + +// Register records the existence of a message type and returns a Handle that +// can be used in the Encoder's EncodeMessageType method to create such +// messages. The prefix of the name should be the package path followed by +// an optional disambiguating string. +// Register will panic if a handle for the same name was already registered. +func Register(name string, handler Handler) Handle { + mutex.Lock() + defer mutex.Unlock() + + if _, ok := names[name]; ok { + panic(fmt.Errorf("catmsg: handler for %q already exists", name)) + } + h := Handle(len(handlers)) + names[name] = h + handlers = append(handlers, handler) + return h +} + +// These handlers require fixed positions in the handlers slice. +const ( + msgVars Handle = iota + msgFirst + msgRaw + msgString + msgAffix + // Leave some arbitrary room for future expansion: 20 should suffice. + numInternal = 20 +) + +const prefix = "golang.org/x/text/internal/catmsg." + +var ( + // TODO: find a more stable way to link handles to message types. + mutex sync.Mutex + names = map[string]Handle{ + prefix + "Vars": msgVars, + prefix + "First": msgFirst, + prefix + "Raw": msgRaw, + prefix + "String": msgString, + prefix + "Affix": msgAffix, + } + handlers = make([]Handler, numInternal) +) + +func init() { + // This handler is a message type wrapper that initializes a decoder + // with a variable block. This message type, if present, is always at the + // start of an encoded message. + handlers[msgVars] = func(d *Decoder) bool { + blockSize := int(d.DecodeUint()) + d.vars = d.data[:blockSize] + d.data = d.data[blockSize:] + return d.executeMessage() + } + + // First takes the first message in a sequence that results in a match for + // the given substitution arguments. + handlers[msgFirst] = func(d *Decoder) bool { + for !d.Done() { + if d.ExecuteMessage() { + return true + } + } + return false + } + + handlers[msgRaw] = func(d *Decoder) bool { + d.Render(d.data) + return true + } + + // A String message alternates between a string constant and a variable + // substitution. + handlers[msgString] = func(d *Decoder) bool { + for !d.Done() { + if str := d.DecodeString(); str != "" { + d.Render(str) + } + if d.Done() { + break + } + d.ExecuteSubstitution() + } + return true + } + + handlers[msgAffix] = func(d *Decoder) bool { + // TODO: use an alternative method for common cases. + prefix := d.DecodeString() + suffix := d.DecodeString() + if prefix != "" { + d.Render(prefix) + } + ret := d.ExecuteMessage() + if suffix != "" { + d.Render(suffix) + } + return ret + } +} + +var ( + // ErrIncomplete indicates a compiled message does not define translations + // for all possible argument values. If this message is returned, evaluating + // a message may result in the ErrNoMatch error. + ErrIncomplete = errors.New("catmsg: incomplete message; may not give result for all inputs") + + // ErrNoMatch indicates no translation message matched the given input + // parameters when evaluating a message. + ErrNoMatch = errors.New("catmsg: no translation for inputs") +) + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + // Compile encodes the format string(s) of the message as a string for later + // evaluation. + // + // The first call Compile makes on the encoder must be EncodeMessageType. + // The handle passed to this call may either be a handle returned by + // Register to encode a single custom message, or HandleFirst followed by + // a sequence of calls to EncodeMessage. + // + // Compile must return ErrIncomplete if it is possible for evaluation to + // not match any translation for a given set of formatting parameters. + // For example, selecting a translation based on plural form may not yield + // a match if the form "Other" is not one of the selectors. + // + // Compile may return any other application-specific error. For backwards + // compatibility with package like fmt, which often do not do sanity + // checking of format strings ahead of time, Compile should still make an + // effort to have some sensible fallback in case of an error. + Compile(e *Encoder) error +} + +// Compile converts a Message to a data string that can be stored in a Catalog. +// The resulting string can subsequently be decoded by passing to the Execute +// method of a Decoder. +func Compile(tag language.Tag, macros Dictionary, m Message) (data string, err error) { + // TODO: pass macros so they can be used for validation. + v := &Encoder{inBody: true} // encoder for variables + v.root = v + e := &Encoder{root: v, parent: v, tag: tag} // encoder for messages + err = m.Compile(e) + // This package serves te message package, which in turn is meant to be a + // drop-in replacement for fmt. With the fmt package, format strings are + // evaluated lazily and errors are handled by substituting strings in the + // result, rather then returning an error. Dealing with multiple languages + // makes it more important to check errors ahead of time. We chose to be + // consistent and compatible and allow graceful degradation in case of + // errors. + buf := e.buf[stripPrefix(e.buf):] + if len(v.buf) > 0 { + // Prepend variable block. + b := make([]byte, 1+maxVarintBytes+len(v.buf)+len(buf)) + b[0] = byte(msgVars) + b = b[:1+encodeUint(b[1:], uint64(len(v.buf)))] + b = append(b, v.buf...) + b = append(b, buf...) + buf = b + } + if err == nil { + err = v.err + } + return string(buf), err +} + +// FirstOf is a message type that prints the first message in the sequence that +// resolves to a match for the given substitution arguments. +type FirstOf []Message + +// Compile implements Message. +func (s FirstOf) Compile(e *Encoder) error { + e.EncodeMessageType(msgFirst) + err := ErrIncomplete + for i, m := range s { + if err == nil { + return fmt.Errorf("catalog: message argument %d is complete and blocks subsequent messages", i-1) + } + err = e.EncodeMessage(m) + } + return err +} + +// Var defines a message that can be substituted for a placeholder of the same +// name. If an expression does not result in a string after evaluation, Name is +// used as the substitution. For example: +// +// Var{ +// Name: "minutes", +// Message: plural.Select(1, "one", "minute"), +// } +// +// will resolve to minute for singular and minutes for plural forms. +type Var struct { + Name string + Message Message +} + +var errIsVar = errors.New("catmsg: variable used as message") + +// Compile implements Message. +// +// Note that this method merely registers a variable; it does not create an +// encoded message. +func (v *Var) Compile(e *Encoder) error { + if err := e.addVar(v.Name, v.Message); err != nil { + return err + } + // Using a Var by itself is an error. If it is in a sequence followed by + // other messages referring to it, this error will be ignored. + return errIsVar +} + +// Raw is a message consisting of a single format string that is passed as is +// to the Renderer. +// +// Note that a Renderer may still do its own variable substitution. +type Raw string + +// Compile implements Message. +func (r Raw) Compile(e *Encoder) (err error) { + e.EncodeMessageType(msgRaw) + // Special case: raw strings don't have a size encoding and so don't use + // EncodeString. + e.buf = append(e.buf, r...) + return nil +} + +// String is a message consisting of a single format string which contains +// placeholders that may be substituted with variables. +// +// Variable substitutions are marked with placeholders and a variable name of +// the form ${name}. Any other substitutions such as Go templates or +// printf-style substitutions are left to be done by the Renderer. +// +// When evaluation a string interpolation, a Renderer will receive separate +// calls for each placeholder and interstitial string. For example, for the +// message: "%[1]v ${invites} %[2]v to ${their} party." The sequence of calls +// is: +// +// d.Render("%[1]v ") +// d.Arg(1) +// d.Render(resultOfInvites) +// d.Render(" %[2]v to ") +// d.Arg(2) +// d.Render(resultOfTheir) +// d.Render(" party.") +// +// where the messages for "invites" and "their" both use a plural.Select +// referring to the first argument. +// +// Strings may also invoke macros. Macros are essentially variables that can be +// reused. Macros may, for instance, be used to make selections between +// different conjugations of a verb. See the catalog package description for an +// overview of macros. +type String string + +// Compile implements Message. It parses the placeholder formats and returns +// any error. +func (s String) Compile(e *Encoder) (err error) { + msg := string(s) + const subStart = "${" + hasHeader := false + p := 0 + b := []byte{} + for { + i := strings.Index(msg[p:], subStart) + if i == -1 { + break + } + b = append(b, msg[p:p+i]...) + p += i + len(subStart) + if i = strings.IndexByte(msg[p:], '}'); i == -1 { + b = append(b, "$!(MISSINGBRACE)"...) + err = fmt.Errorf("catmsg: missing '}'") + p = len(msg) + break + } + name := strings.TrimSpace(msg[p : p+i]) + if q := strings.IndexByte(name, '('); q == -1 { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name) + b = b[:0] + } else if j := strings.IndexByte(name[q:], ')'); j == -1 { + // TODO: what should the error be? + b = append(b, "$!(MISSINGPAREN)"...) + err = fmt.Errorf("catmsg: missing ')'") + } else if x, sErr := strconv.ParseUint(strings.TrimSpace(name[q+1:q+j]), 10, 32); sErr != nil { + // TODO: handle more than one argument + b = append(b, "$!(BADNUM)"...) + err = fmt.Errorf("catmsg: invalid number %q", strings.TrimSpace(name[q+1:q+j])) + } else { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name[:q], int(x)) + b = b[:0] + } + p += i + 1 + } + b = append(b, msg[p:]...) + if !hasHeader { + // Simplify string to a raw string. + Raw(string(b)).Compile(e) + } else if len(b) > 0 { + e.EncodeString(string(b)) + } + return err +} + +// Affix is a message that adds a prefix and suffix to another message. +// This is mostly used add back whitespace to a translation that was stripped +// before sending it out. +type Affix struct { + Message Message + Prefix string + Suffix string +} + +// Compile implements Message. +func (a Affix) Compile(e *Encoder) (err error) { + // TODO: consider adding a special message type that just adds a single + // return. This is probably common enough to handle the majority of cases. + // Get some stats first, though. + e.EncodeMessageType(msgAffix) + e.EncodeString(a.Prefix) + e.EncodeString(a.Suffix) + e.EncodeMessage(a.Message) + return nil +} diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go new file mode 100644 index 000000000..547802b0f --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/codec.go @@ -0,0 +1,407 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +import ( + "errors" + "fmt" + + "golang.org/x/text/language" +) + +// A Renderer renders a Message. +type Renderer interface { + // Render renders the given string. The given string may be interpreted as a + // format string, such as the one used by the fmt package or a template. + Render(s string) + + // Arg returns the i-th argument passed to format a message. This method + // should return nil if there is no such argument. Messages need access to + // arguments to allow selecting a message based on linguistic features of + // those arguments. + Arg(i int) interface{} +} + +// A Dictionary specifies a source of messages, including variables or macros. +type Dictionary interface { + // Lookup returns the message for the given key. It returns false for ok if + // such a message could not be found. + Lookup(key string) (data string, ok bool) + + // TODO: consider returning an interface, instead of a string. This will + // allow implementations to do their own message type decoding. +} + +// An Encoder serializes a Message to a string. +type Encoder struct { + // The root encoder is used for storing encoded variables. + root *Encoder + // The parent encoder provides the surrounding scopes for resolving variable + // names. + parent *Encoder + + tag language.Tag + + // buf holds the encoded message so far. After a message completes encoding, + // the contents of buf, prefixed by the encoded length, are flushed to the + // parent buffer. + buf []byte + + // vars is the lookup table of variables in the current scope. + vars []keyVal + + err error + inBody bool // if false next call must be EncodeMessageType +} + +type keyVal struct { + key string + offset int +} + +// Language reports the language for which the encoded message will be stored +// in the Catalog. +func (e *Encoder) Language() language.Tag { return e.tag } + +func (e *Encoder) setError(err error) { + if e.root.err == nil { + e.root.err = err + } +} + +// EncodeUint encodes x. +func (e *Encoder) EncodeUint(x uint64) { + e.checkInBody() + var buf [maxVarintBytes]byte + n := encodeUint(buf[:], x) + e.buf = append(e.buf, buf[:n]...) +} + +// EncodeString encodes s. +func (e *Encoder) EncodeString(s string) { + e.checkInBody() + e.EncodeUint(uint64(len(s))) + e.buf = append(e.buf, s...) +} + +// EncodeMessageType marks the current message to be of type h. +// +// It must be the first call of a Message's Compile method. +func (e *Encoder) EncodeMessageType(h Handle) { + if e.inBody { + panic("catmsg: EncodeMessageType not the first method called") + } + e.inBody = true + e.EncodeUint(uint64(h)) +} + +// EncodeMessage serializes the given message inline at the current position. +func (e *Encoder) EncodeMessage(m Message) error { + e = &Encoder{root: e.root, parent: e, tag: e.tag} + err := m.Compile(e) + if _, ok := m.(*Var); !ok { + e.flushTo(e.parent) + } + return err +} + +func (e *Encoder) checkInBody() { + if !e.inBody { + panic("catmsg: expected prior call to EncodeMessageType") + } +} + +// stripPrefix indicates the number of prefix bytes that must be stripped to +// turn a single-element sequence into a message that is just this single member +// without its size prefix. If the message can be stripped, b[1:n] contains the +// size prefix. +func stripPrefix(b []byte) (n int) { + if len(b) > 0 && Handle(b[0]) == msgFirst { + x, n, _ := decodeUint(b[1:]) + if 1+n+int(x) == len(b) { + return 1 + n + } + } + return 0 +} + +func (e *Encoder) flushTo(dst *Encoder) { + data := e.buf + p := stripPrefix(data) + if p > 0 { + data = data[1:] + } else { + // Prefix the size. + dst.EncodeUint(uint64(len(data))) + } + dst.buf = append(dst.buf, data...) +} + +func (e *Encoder) addVar(key string, m Message) error { + for _, v := range e.parent.vars { + if v.key == key { + err := fmt.Errorf("catmsg: duplicate variable %q", key) + e.setError(err) + return err + } + } + scope := e.parent + // If a variable message is Incomplete, and does not evaluate to a message + // during execution, we fall back to the variable name. We encode this by + // appending the variable name if the message reports it's incomplete. + + err := m.Compile(e) + if err != ErrIncomplete { + e.setError(err) + } + switch { + case len(e.buf) == 1 && Handle(e.buf[0]) == msgFirst: // empty sequence + e.buf = e.buf[:0] + e.inBody = false + fallthrough + case len(e.buf) == 0: + // Empty message. + if err := String(key).Compile(e); err != nil { + e.setError(err) + } + case err == ErrIncomplete: + if Handle(e.buf[0]) != msgFirst { + seq := &Encoder{root: e.root, parent: e} + seq.EncodeMessageType(msgFirst) + e.flushTo(seq) + e = seq + } + // e contains a sequence; append the fallback string. + e.EncodeMessage(String(key)) + } + + // Flush result to variable heap. + offset := len(e.root.buf) + e.flushTo(e.root) + e.buf = e.buf[:0] + + // Record variable offset in current scope. + scope.vars = append(scope.vars, keyVal{key: key, offset: offset}) + return err +} + +const ( + substituteVar = iota + substituteMacro + substituteError +) + +// EncodeSubstitution inserts a resolved reference to a variable or macro. +// +// This call must be matched with a call to ExecuteSubstitution at decoding +// time. +func (e *Encoder) EncodeSubstitution(name string, arguments ...int) { + if arity := len(arguments); arity > 0 { + // TODO: also resolve macros. + e.EncodeUint(substituteMacro) + e.EncodeString(name) + for _, a := range arguments { + e.EncodeUint(uint64(a)) + } + return + } + for scope := e; scope != nil; scope = scope.parent { + for _, v := range scope.vars { + if v.key != name { + continue + } + e.EncodeUint(substituteVar) // TODO: support arity > 0 + e.EncodeUint(uint64(v.offset)) + return + } + } + // TODO: refer to dictionary-wide scoped variables. + e.EncodeUint(substituteError) + e.EncodeString(name) + e.setError(fmt.Errorf("catmsg: unknown var %q", name)) +} + +// A Decoder deserializes and evaluates messages that are encoded by an encoder. +type Decoder struct { + tag language.Tag + dst Renderer + macros Dictionary + + err error + vars string + data string + + macroArg int // TODO: allow more than one argument +} + +// NewDecoder returns a new Decoder. +// +// Decoders are designed to be reused for multiple invocations of Execute. +// Only one goroutine may call Execute concurrently. +func NewDecoder(tag language.Tag, r Renderer, macros Dictionary) *Decoder { + return &Decoder{ + tag: tag, + dst: r, + macros: macros, + } +} + +func (d *Decoder) setError(err error) { + if d.err == nil { + d.err = err + } +} + +// Language returns the language in which the message is being rendered. +// +// The destination language may be a child language of the language used for +// encoding. For instance, a decoding language of "pt-PT" is consistent with an +// encoding language of "pt". +func (d *Decoder) Language() language.Tag { return d.tag } + +// Done reports whether there are more bytes to process in this message. +func (d *Decoder) Done() bool { return len(d.data) == 0 } + +// Render implements Renderer. +func (d *Decoder) Render(s string) { d.dst.Render(s) } + +// Arg implements Renderer. +// +// During evaluation of macros, the argument positions may be mapped to +// arguments that differ from the original call. +func (d *Decoder) Arg(i int) interface{} { + if d.macroArg != 0 { + if i != 1 { + panic("catmsg: only macros with single argument supported") + } + i = d.macroArg + } + return d.dst.Arg(i) +} + +// DecodeUint decodes a number that was encoded with EncodeUint and advances the +// position. +func (d *Decoder) DecodeUint() uint64 { + x, n, err := decodeUintString(d.data) + d.data = d.data[n:] + if err != nil { + d.setError(err) + } + return x +} + +// DecodeString decodes a string that was encoded with EncodeString and advances +// the position. +func (d *Decoder) DecodeString() string { + size := d.DecodeUint() + s := d.data[:size] + d.data = d.data[size:] + return s +} + +// SkipMessage skips the message at the current location and advances the +// position. +func (d *Decoder) SkipMessage() { + n := int(d.DecodeUint()) + d.data = d.data[n:] +} + +// Execute decodes and evaluates msg. +// +// Only one goroutine may call execute. +func (d *Decoder) Execute(msg string) error { + d.err = nil + if !d.execute(msg) { + return ErrNoMatch + } + return d.err +} + +func (d *Decoder) execute(msg string) bool { + saved := d.data + d.data = msg + ok := d.executeMessage() + d.data = saved + return ok +} + +// executeMessageFromData is like execute, but also decodes a leading message +// size and clips the given string accordingly. +// +// It reports the number of bytes consumed and whether a message was selected. +func (d *Decoder) executeMessageFromData(s string) (n int, ok bool) { + saved := d.data + d.data = s + size := int(d.DecodeUint()) + n = len(s) - len(d.data) + // Sanitize the setting. This allows skipping a size argument for + // RawString and method Done. + d.data = d.data[:size] + ok = d.executeMessage() + n += size - len(d.data) + d.data = saved + return n, ok +} + +var errUnknownHandler = errors.New("catmsg: string contains unsupported handler") + +// executeMessage reads the handle id, initializes the decoder and executes the +// message. It is assumed that all of d.data[d.p:] is the single message. +func (d *Decoder) executeMessage() bool { + if d.Done() { + // We interpret no data as a valid empty message. + return true + } + handle := d.DecodeUint() + + var fn Handler + mutex.Lock() + if int(handle) < len(handlers) { + fn = handlers[handle] + } + mutex.Unlock() + if fn == nil { + d.setError(errUnknownHandler) + d.execute(fmt.Sprintf("\x02$!(UNKNOWNMSGHANDLER=%#x)", handle)) + return true + } + return fn(d) +} + +// ExecuteMessage decodes and executes the message at the current position. +func (d *Decoder) ExecuteMessage() bool { + n, ok := d.executeMessageFromData(d.data) + d.data = d.data[n:] + return ok +} + +// ExecuteSubstitution executes the message corresponding to the substitution +// as encoded by EncodeSubstitution. +func (d *Decoder) ExecuteSubstitution() { + switch x := d.DecodeUint(); x { + case substituteVar: + offset := d.DecodeUint() + d.executeMessageFromData(d.vars[offset:]) + case substituteMacro: + name := d.DecodeString() + data, ok := d.macros.Lookup(name) + old := d.macroArg + // TODO: support macros of arity other than 1. + d.macroArg = int(d.DecodeUint()) + switch { + case !ok: + // TODO: detect this at creation time. + d.setError(fmt.Errorf("catmsg: undefined macro %q", name)) + fallthrough + case !d.execute(data): + d.dst.Render(name) // fall back to macro name. + } + d.macroArg = old + case substituteError: + d.dst.Render(d.DecodeString()) + default: + panic("catmsg: unreachable") + } +} diff --git a/vendor/golang.org/x/text/internal/catmsg/varint.go b/vendor/golang.org/x/text/internal/catmsg/varint.go new file mode 100644 index 000000000..a2cee2cf5 --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/varint.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +// This file implements varint encoding analogous to the one in encoding/binary. +// We need a string version of this function, so we add that here and then add +// the rest for consistency. + +import "errors" + +var ( + errIllegalVarint = errors.New("catmsg: illegal varint") + errVarintTooLarge = errors.New("catmsg: varint too large for uint64") +) + +const maxVarintBytes = 10 // maximum length of a varint + +// encodeUint encodes x as a variable-sized integer into buf and returns the +// number of bytes written. buf must be at least maxVarintBytes long +func encodeUint(buf []byte, x uint64) (n int) { + for ; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return n +} + +func decodeUintString(s string) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(s) { + return 0, i, errIllegalVarint + } + b := uint64(s[i]) + i++ + x |= (b & 0x7F) << shift + if b&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} + +func decodeUint(b []byte) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(b) { + return 0, i, errIllegalVarint + } + c := uint64(b[i]) + i++ + x |= (c & 0x7F) << shift + if c&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} diff --git a/vendor/golang.org/x/text/internal/format/format.go b/vendor/golang.org/x/text/internal/format/format.go new file mode 100644 index 000000000..ee1c57a3c --- /dev/null +++ b/vendor/golang.org/x/text/internal/format/format.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format contains types for defining language-specific formatting of +// values. +// +// This package is internal now, but will eventually be exposed after the API +// settles. +package format // import "golang.org/x/text/internal/format" + +import ( + "fmt" + + "golang.org/x/text/language" +) + +// State represents the printer state passed to custom formatters. It provides +// access to the fmt.State interface and the sentence and language-related +// context. +type State interface { + fmt.State + + // Language reports the requested language in which to render a message. + Language() language.Tag + + // TODO: consider this and removing rune from the Format method in the + // Formatter interface. + // + // Verb returns the format variant to render, analogous to the types used + // in fmt. Use 'v' for the default or only variant. + // Verb() rune + + // TODO: more info: + // - sentence context such as linguistic features passed by the translator. +} + +// Formatter is analogous to fmt.Formatter. +type Formatter interface { + Format(state State, verb rune) +} diff --git a/vendor/golang.org/x/text/internal/format/parser.go b/vendor/golang.org/x/text/internal/format/parser.go new file mode 100644 index 000000000..855aed71d --- /dev/null +++ b/vendor/golang.org/x/text/internal/format/parser.go @@ -0,0 +1,358 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package format + +import ( + "reflect" + "unicode/utf8" +) + +// A Parser parses a format string. The result from the parse are set in the +// struct fields. +type Parser struct { + Verb rune + + WidthPresent bool + PrecPresent bool + Minus bool + Plus bool + Sharp bool + Space bool + Zero bool + + // For the formats %+v %#v, we set the plusV/sharpV flags + // and clear the plus/sharp flags since %+v and %#v are in effect + // different, flagless formats set at the top level. + PlusV bool + SharpV bool + + HasIndex bool + + Width int + Prec int // precision + + // retain arguments across calls. + Args []interface{} + // retain current argument number across calls + ArgNum int + + // reordered records whether the format string used argument reordering. + Reordered bool + // goodArgNum records whether the most recent reordering directive was valid. + goodArgNum bool + + // position info + format string + startPos int + endPos int + Status Status +} + +// Reset initializes a parser to scan format strings for the given args. +func (p *Parser) Reset(args []interface{}) { + p.Args = args + p.ArgNum = 0 + p.startPos = 0 + p.Reordered = false +} + +// Text returns the part of the format string that was parsed by the last call +// to Scan. It returns the original substitution clause if the current scan +// parsed a substitution. +func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] } + +// SetFormat sets a new format string to parse. It does not reset the argument +// count. +func (p *Parser) SetFormat(format string) { + p.format = format + p.startPos = 0 + p.endPos = 0 +} + +// Status indicates the result type of a call to Scan. +type Status int + +const ( + StatusText Status = iota + StatusSubstitution + StatusBadWidthSubstitution + StatusBadPrecSubstitution + StatusNoVerb + StatusBadArgNum + StatusMissingArg +) + +// ClearFlags reset the parser to default behavior. +func (p *Parser) ClearFlags() { + p.WidthPresent = false + p.PrecPresent = false + p.Minus = false + p.Plus = false + p.Sharp = false + p.Space = false + p.Zero = false + + p.PlusV = false + p.SharpV = false + + p.HasIndex = false +} + +// Scan scans the next part of the format string and sets the status to +// indicate whether it scanned a string literal, substitution or error. +func (p *Parser) Scan() bool { + p.Status = StatusText + format := p.format + end := len(format) + if p.endPos >= end { + return false + } + afterIndex := false // previous item in format was an index like [3]. + + p.startPos = p.endPos + p.goodArgNum = true + i := p.startPos + for i < end && format[i] != '%' { + i++ + } + if i > p.startPos { + p.endPos = i + return true + } + // Process one verb + i++ + + p.Status = StatusSubstitution + + // Do we have flags? + p.ClearFlags() + +simpleFormat: + for ; i < end; i++ { + c := p.format[i] + switch c { + case '#': + p.Sharp = true + case '0': + p.Zero = !p.Minus // Only allow zero padding to the left. + case '+': + p.Plus = true + case '-': + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + case ' ': + p.Space = true + default: + // Fast path for common case of ascii lower case simple verbs + // without precision or width or argument indices. + if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) { + if c == 'v' { + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + } + p.Verb = rune(c) + p.ArgNum++ + p.endPos = i + 1 + return true + } + // Format is more complex than simple flags and a verb or is malformed. + break simpleFormat + } + } + + // Do we have an explicit argument index? + i, afterIndex = p.updateArgNumber(format, i) + + // Do we have width? + if i < end && format[i] == '*' { + i++ + p.Width, p.WidthPresent = p.intFromArg() + + if !p.WidthPresent { + p.Status = StatusBadWidthSubstitution + } + + // We have a negative width, so take its value and ensure + // that the minus flag is set + if p.Width < 0 { + p.Width = -p.Width + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + } + afterIndex = false + } else { + p.Width, p.WidthPresent, i = parsenum(format, i, end) + if afterIndex && p.WidthPresent { // "%[3]2d" + p.goodArgNum = false + } + } + + // Do we have precision? + if i+1 < end && format[i] == '.' { + i++ + if afterIndex { // "%[3].2d" + p.goodArgNum = false + } + i, afterIndex = p.updateArgNumber(format, i) + if i < end && format[i] == '*' { + i++ + p.Prec, p.PrecPresent = p.intFromArg() + // Negative precision arguments don't make sense + if p.Prec < 0 { + p.Prec = 0 + p.PrecPresent = false + } + if !p.PrecPresent { + p.Status = StatusBadPrecSubstitution + } + afterIndex = false + } else { + p.Prec, p.PrecPresent, i = parsenum(format, i, end) + if !p.PrecPresent { + p.Prec = 0 + p.PrecPresent = true + } + } + } + + if !afterIndex { + i, afterIndex = p.updateArgNumber(format, i) + } + p.HasIndex = afterIndex + + if i >= end { + p.endPos = i + p.Status = StatusNoVerb + return true + } + + verb, w := utf8.DecodeRuneInString(format[i:]) + p.endPos = i + w + p.Verb = verb + + switch { + case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec. + p.startPos = p.endPos - 1 + p.Status = StatusText + case !p.goodArgNum: + p.Status = StatusBadArgNum + case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb. + p.Status = StatusMissingArg + p.ArgNum++ + case verb == 'v': + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + fallthrough + default: + p.ArgNum++ + } + return true +} + +// intFromArg gets the ArgNumth element of Args. On return, isInt reports +// whether the argument has integer type. +func (p *Parser) intFromArg() (num int, isInt bool) { + if p.ArgNum < len(p.Args) { + arg := p.Args[p.ArgNum] + num, isInt = arg.(int) // Almost always OK. + if !isInt { + // Work harder. + switch v := reflect.ValueOf(arg); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := v.Int() + if int64(int(n)) == n { + num = int(n) + isInt = true + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := v.Uint() + if int64(n) >= 0 && uint64(int(n)) == n { + num = int(n) + isInt = true + } + default: + // Already 0, false. + } + } + p.ArgNum++ + if tooLarge(num) { + num = 0 + isInt = false + } + } + return +} + +// parseArgNumber returns the value of the bracketed number, minus 1 +// (explicit argument numbers are one-indexed but we want zero-indexed). +// The opening bracket is known to be present at format[0]. +// The returned values are the index, the number of bytes to consume +// up to the closing paren, if present, and whether the number parsed +// ok. The bytes to consume will be 1 if no closing paren is present. +func parseArgNumber(format string) (index int, wid int, ok bool) { + // There must be at least 3 bytes: [n]. + if len(format) < 3 { + return 0, 1, false + } + + // Find closing bracket. + for i := 1; i < len(format); i++ { + if format[i] == ']' { + width, ok, newi := parsenum(format, 1, i) + if !ok || newi != i { + return 0, i + 1, false + } + return width - 1, i + 1, true // arg numbers are one-indexed and skip paren. + } + } + return 0, 1, false +} + +// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in +// argNum or the value of the bracketed integer that begins format[i:]. It also returns +// the new value of i, that is, the index of the next byte of the format to process. +func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) { + if len(format) <= i || format[i] != '[' { + return i, false + } + p.Reordered = true + index, wid, ok := parseArgNumber(format[i:]) + if ok && 0 <= index && index < len(p.Args) { + p.ArgNum = index + return i + wid, true + } + p.goodArgNum = false + return i + wid, ok +} + +// tooLarge reports whether the magnitude of the integer is +// too large to be used as a formatting width or precision. +func tooLarge(x int) bool { + const max int = 1e6 + return x > max || x < -max +} + +// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present. +func parsenum(s string, start, end int) (num int, isnum bool, newi int) { + if start >= end { + return 0, false, end + } + for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ { + if tooLarge(num) { + return 0, false, end // Overflow; crazy long number most likely. + } + num = num*10 + int(s[newi]-'0') + isnum = true + } + return +} diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go new file mode 100644 index 000000000..3cddbbdda --- /dev/null +++ b/vendor/golang.org/x/text/internal/internal.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains non-exported functionality that are used by +// packages in the text repository. +package internal // import "golang.org/x/text/internal" + +import ( + "sort" + + "golang.org/x/text/language" +) + +// SortTags sorts tags in place. +func SortTags(tags []language.Tag) { + sort.Sort(sorter(tags)) +} + +type sorter []language.Tag + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sorter) Less(i, j int) bool { + return s[i].String() < s[j].String() +} + +// UniqueTags sorts and filters duplicate tags in place and returns a slice with +// only unique tags. +func UniqueTags(tags []language.Tag) []language.Tag { + if len(tags) <= 1 { + return tags + } + SortTags(tags) + k := 0 + for i := 1; i < len(tags); i++ { + if tags[k].String() < tags[i].String() { + k++ + tags[k] = tags[i] + } + } + return tags[:k+1] +} diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go new file mode 100644 index 000000000..1cc004a6d --- /dev/null +++ b/vendor/golang.org/x/text/internal/match.go @@ -0,0 +1,67 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// This file contains matchers that implement CLDR inheritance. +// +// See https://unicode.org/reports/tr35/#Locale_Inheritance. +// +// Some of the inheritance described in this document is already handled by +// the cldr package. + +import ( + "golang.org/x/text/language" +) + +// TODO: consider if (some of the) matching algorithm needs to be public after +// getting some feel about what is generic and what is specific. + +// NewInheritanceMatcher returns a matcher that matches based on the inheritance +// chain. +// +// The matcher uses canonicalization and the parent relationship to find a +// match. The resulting match will always be either Und or a language with the +// same language and script as the requested language. It will not match +// languages for which there is understood to be mutual or one-directional +// intelligibility. +// +// A Match will indicate an Exact match if the language matches after +// canonicalization and High if the matched tag is a parent. +func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher { + tags := &InheritanceMatcher{make(map[language.Tag]int)} + for i, tag := range t { + ct, err := language.All.Canonicalize(tag) + if err != nil { + ct = tag + } + tags.index[ct] = i + } + return tags +} + +type InheritanceMatcher struct { + index map[language.Tag]int +} + +func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) { + for _, t := range want { + ct, err := language.All.Canonicalize(t) + if err != nil { + ct = t + } + conf := language.Exact + for { + if index, ok := m.index[ct]; ok { + return ct, index, conf + } + if ct == language.Und { + break + } + ct = ct.Parent() + conf = language.High + } + } + return language.Und, 0, language.No +} diff --git a/vendor/golang.org/x/text/internal/number/common.go b/vendor/golang.org/x/text/internal/number/common.go new file mode 100644 index 000000000..a6e9c8e0d --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/common.go @@ -0,0 +1,55 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" +) + +// A system identifies a CLDR numbering system. +type system byte + +type systemData struct { + id system + digitSize byte // number of UTF-8 bytes per digit + zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit. +} + +// A SymbolType identifies a symbol of a specific kind. +type SymbolType int + +const ( + SymDecimal SymbolType = iota + SymGroup + SymList + SymPercentSign + SymPlusSign + SymMinusSign + SymExponential + SymSuperscriptingExponent + SymPerMille + SymInfinity + SymNan + SymTimeSeparator + + NumSymbolTypes +) + +const hasNonLatnMask = 0x8000 + +// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask +// is not 0 (with this bit masked out), and an offset into symIndex otherwise. +// +// TODO: this type can be a byte again if we use an indirection into altsymData +// and introduce an alt -> offset slice (the length of this will be number of +// alternatives plus 1). This also allows getting rid of the compactTag field +// in altSymData. In total this will save about 1K. +type symOffset uint16 + +type altSymData struct { + compactTag compact.ID + symIndex symOffset + system system +} diff --git a/vendor/golang.org/x/text/internal/number/decimal.go b/vendor/golang.org/x/text/internal/number/decimal.go new file mode 100644 index 000000000..e128cf343 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/decimal.go @@ -0,0 +1,500 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type RoundingMode + +package number + +import ( + "math" + "strconv" +) + +// RoundingMode determines how a number is rounded to the desired precision. +type RoundingMode byte + +const ( + ToNearestEven RoundingMode = iota // towards the nearest integer, or towards an even number if equidistant. + ToNearestZero // towards the nearest integer, or towards zero if equidistant. + ToNearestAway // towards the nearest integer, or away from zero if equidistant. + ToPositiveInf // towards infinity + ToNegativeInf // towards negative infinity + ToZero // towards zero + AwayFromZero // away from zero + numModes +) + +const maxIntDigits = 20 + +// A Decimal represents a floating point number in decimal format. +// Digits represents a number [0, 1.0), and the absolute value represented by +// Decimal is Digits * 10^Exp. Leading and trailing zeros may be omitted and Exp +// may point outside a valid position in Digits. +// +// Examples: +// +// Number Decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 +// 12000 Digits: [1, 2], Exp: 5 +// 12000.00 Digits: [1, 2], Exp: 5 +// 0.00123 Digits: [1, 2, 3], Exp: -2 +// 0 Digits: [], Exp: 0 +type Decimal struct { + digits + + buf [maxIntDigits]byte +} + +type digits struct { + Digits []byte // mantissa digits, big-endian + Exp int32 // exponent + Neg bool + Inf bool // Takes precedence over Digits and Exp. + NaN bool // Takes precedence over Inf. +} + +// Digits represents a floating point number represented in digits of the +// base in which a number is to be displayed. It is similar to Decimal, but +// keeps track of trailing fraction zeros and the comma placement for +// engineering notation. Digits must have at least one digit. +// +// Examples: +// +// Number Decimal +// decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 End: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 End: 5 +// 12000 Digits: [1, 2], Exp: 5 End: 5 +// 12000.00 Digits: [1, 2], Exp: 5 End: 7 +// 0.00123 Digits: [1, 2, 3], Exp: -2 End: 3 +// 0 Digits: [], Exp: 0 End: 1 +// scientific (actual exp is Exp - Comma) +// 0e0 Digits: [0], Exp: 1, End: 1, Comma: 1 +// .0e0 Digits: [0], Exp: 0, End: 1, Comma: 0 +// 0.0e0 Digits: [0], Exp: 1, End: 2, Comma: 1 +// 1.23e4 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 1 +// .123e5 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 0 +// engineering +// 12.3e3 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 2 +type Digits struct { + digits + // End indicates the end position of the number. + End int32 // For decimals Exp <= End. For scientific len(Digits) <= End. + // Comma is used for the comma position for scientific (always 0 or 1) and + // engineering notation (always 0, 1, 2, or 3). + Comma uint8 + // IsScientific indicates whether this number is to be rendered as a + // scientific number. + IsScientific bool +} + +func (d *Digits) NumFracDigits() int { + if d.Exp >= d.End { + return 0 + } + return int(d.End - d.Exp) +} + +// normalize returns a new Decimal with leading and trailing zeros removed. +func (d *Decimal) normalize() (n Decimal) { + n = *d + b := n.Digits + // Strip leading zeros. Resulting number of digits is significant digits. + for len(b) > 0 && b[0] == 0 { + b = b[1:] + n.Exp-- + } + // Strip trailing zeros + for len(b) > 0 && b[len(b)-1] == 0 { + b = b[:len(b)-1] + } + if len(b) == 0 { + n.Exp = 0 + } + n.Digits = b + return n +} + +func (d *Decimal) clear() { + b := d.Digits + if b == nil { + b = d.buf[:0] + } + *d = Decimal{} + d.Digits = b[:0] +} + +func (x *Decimal) String() string { + if x.NaN { + return "NaN" + } + var buf []byte + if x.Neg { + buf = append(buf, '-') + } + if x.Inf { + buf = append(buf, "Inf"...) + return string(buf) + } + switch { + case len(x.Digits) == 0: + buf = append(buf, '0') + case x.Exp <= 0: + // 0.00ddd + buf = append(buf, "0."...) + buf = appendZeros(buf, -int(x.Exp)) + buf = appendDigits(buf, x.Digits) + + case /* 0 < */ int(x.Exp) < len(x.Digits): + // dd.ddd + buf = appendDigits(buf, x.Digits[:x.Exp]) + buf = append(buf, '.') + buf = appendDigits(buf, x.Digits[x.Exp:]) + + default: // len(x.Digits) <= x.Exp + // ddd00 + buf = appendDigits(buf, x.Digits) + buf = appendZeros(buf, int(x.Exp)-len(x.Digits)) + } + return string(buf) +} + +func appendDigits(buf []byte, digits []byte) []byte { + for _, c := range digits { + buf = append(buf, c+'0') + } + return buf +} + +// appendZeros appends n 0 digits to buf and returns buf. +func appendZeros(buf []byte, n int) []byte { + for ; n > 0; n-- { + buf = append(buf, '0') + } + return buf +} + +func (d *digits) round(mode RoundingMode, n int) { + if n >= len(d.Digits) { + return + } + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + inc := false + switch mode { + case ToNegativeInf: + inc = d.Neg + case ToPositiveInf: + inc = !d.Neg + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && + (len(d.Digits) > n+1 || n == 0 || d.Digits[n-1]&1 != 0) + case ToNearestAway: + inc = d.Digits[n] >= 5 + case ToNearestZero: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && len(d.Digits) > n+1 + default: + panic("unreachable") + } + if inc { + d.roundUp(n) + } else { + d.roundDown(n) + } +} + +// roundFloat rounds a floating point number. +func (r RoundingMode) roundFloat(x float64) float64 { + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + abs := x + if x < 0 { + abs = -x + } + i, f := math.Modf(abs) + if f == 0.0 { + return x + } + inc := false + switch r { + case ToNegativeInf: + inc = x < 0 + case ToPositiveInf: + inc = x >= 0 + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + // TODO: check overflow + inc = f > 0.5 || f == 0.5 && int64(i)&1 != 0 + case ToNearestAway: + inc = f >= 0.5 + case ToNearestZero: + inc = f > 0.5 + default: + panic("unreachable") + } + if inc { + i += 1 + } + if abs != x { + i = -i + } + return i +} + +func (x *digits) roundUp(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + // find first digit < 9 + for n > 0 && x.Digits[n-1] >= 9 { + n-- + } + + if n == 0 { + // all digits are 9s => round up to 1 and update exponent + x.Digits[0] = 1 // ok since len(x.Digits) > n + x.Digits = x.Digits[:1] + x.Exp++ + return + } + x.Digits[n-1]++ + x.Digits = x.Digits[:n] + // x already trimmed +} + +func (x *digits) roundDown(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + x.Digits = x.Digits[:n] + trim(x) +} + +// trim cuts off any trailing zeros from x's mantissa; +// they are meaningless for the value of x. +func trim(x *digits) { + i := len(x.Digits) + for i > 0 && x.Digits[i-1] == 0 { + i-- + } + x.Digits = x.Digits[:i] + if i == 0 { + x.Exp = 0 + } +} + +// A Converter converts a number into decimals according to the given rounding +// criteria. +type Converter interface { + Convert(d *Decimal, r RoundingContext) +} + +const ( + signed = true + unsigned = false +) + +// Convert converts the given number to the decimal representation using the +// supplied RoundingContext. +func (d *Decimal) Convert(r RoundingContext, number interface{}) { + switch f := number.(type) { + case Converter: + d.clear() + f.Convert(d, r) + case float32: + d.ConvertFloat(r, float64(f), 32) + case float64: + d.ConvertFloat(r, f, 64) + case int: + d.ConvertInt(r, signed, uint64(f)) + case int8: + d.ConvertInt(r, signed, uint64(f)) + case int16: + d.ConvertInt(r, signed, uint64(f)) + case int32: + d.ConvertInt(r, signed, uint64(f)) + case int64: + d.ConvertInt(r, signed, uint64(f)) + case uint: + d.ConvertInt(r, unsigned, uint64(f)) + case uint8: + d.ConvertInt(r, unsigned, uint64(f)) + case uint16: + d.ConvertInt(r, unsigned, uint64(f)) + case uint32: + d.ConvertInt(r, unsigned, uint64(f)) + case uint64: + d.ConvertInt(r, unsigned, f) + + default: + d.NaN = true + // TODO: + // case string: if produced by strconv, allows for easy arbitrary pos. + // case reflect.Value: + // case big.Float + // case big.Int + // case big.Rat? + // catch underlyings using reflect or will this already be done by the + // message package? + } +} + +// ConvertInt converts an integer to decimals. +func (d *Decimal) ConvertInt(r RoundingContext, signed bool, x uint64) { + if r.Increment > 0 { + // TODO: if uint64 is too large, fall back to float64 + if signed { + d.ConvertFloat(r, float64(int64(x)), 64) + } else { + d.ConvertFloat(r, float64(x), 64) + } + return + } + d.clear() + if signed && int64(x) < 0 { + x = uint64(-int64(x)) + d.Neg = true + } + d.fillIntDigits(x) + d.Exp = int32(len(d.Digits)) +} + +// ConvertFloat converts a floating point number to decimals. +func (d *Decimal) ConvertFloat(r RoundingContext, x float64, size int) { + d.clear() + if math.IsNaN(x) { + d.NaN = true + return + } + // Simple case: decimal notation + if r.Increment > 0 { + scale := int(r.IncrementScale) + mult := 1.0 + if scale >= len(scales) { + mult = math.Pow(10, float64(scale)) + } else { + mult = scales[scale] + } + // We multiply x instead of dividing inc as it gives less rounding + // issues. + x *= mult + x /= float64(r.Increment) + x = r.Mode.roundFloat(x) + x *= float64(r.Increment) + x /= mult + } + + abs := x + if x < 0 { + d.Neg = true + abs = -x + } + if math.IsInf(abs, 1) { + d.Inf = true + return + } + + // By default we get the exact decimal representation. + verb := byte('g') + prec := -1 + // As the strconv API does not return the rounding accuracy, we can only + // round using ToNearestEven. + if r.Mode == ToNearestEven { + if n := r.RoundSignificantDigits(); n >= 0 { + prec = n + } else if n = r.RoundFractionDigits(); n >= 0 { + prec = n + verb = 'f' + } + } else { + // TODO: At this point strconv's rounding is imprecise to the point that + // it is not usable for this purpose. + // See https://github.com/golang/go/issues/21714 + // If rounding is requested, we ask for a large number of digits and + // round from there to simulate rounding only once. + // Ideally we would have strconv export an AppendDigits that would take + // a rounding mode and/or return an accuracy. Something like this would + // work: + // AppendDigits(dst []byte, x float64, base, size, prec int) (digits []byte, exp, accuracy int) + hasPrec := r.RoundSignificantDigits() >= 0 + hasScale := r.RoundFractionDigits() >= 0 + if hasPrec || hasScale { + // prec is the number of mantissa bits plus some extra for safety. + // We need at least the number of mantissa bits as decimals to + // accurately represent the floating point without rounding, as each + // bit requires one more decimal to represent: 0.5, 0.25, 0.125, ... + prec = 60 + } + } + + b := strconv.AppendFloat(d.Digits[:0], abs, verb, prec, size) + i := 0 + k := 0 + beforeDot := 1 + for i < len(b) { + if c := b[i]; '0' <= c && c <= '9' { + b[k] = c - '0' + k++ + d.Exp += int32(beforeDot) + } else if c == '.' { + beforeDot = 0 + d.Exp = int32(k) + } else { + break + } + i++ + } + d.Digits = b[:k] + if i != len(b) { + i += len("e") + pSign := i + exp := 0 + for i++; i < len(b); i++ { + exp *= 10 + exp += int(b[i] - '0') + } + if b[pSign] == '-' { + exp = -exp + } + d.Exp = int32(exp) + 1 + } +} + +func (d *Decimal) fillIntDigits(x uint64) { + if cap(d.Digits) < maxIntDigits { + d.Digits = d.buf[:] + } else { + d.Digits = d.buf[:maxIntDigits] + } + i := 0 + for ; x > 0; x /= 10 { + d.Digits[i] = byte(x % 10) + i++ + } + d.Digits = d.Digits[:i] + for p := 0; p < i; p++ { + i-- + d.Digits[p], d.Digits[i] = d.Digits[i], d.Digits[p] + } +} + +var scales [70]float64 + +func init() { + x := 1.0 + for i := range scales { + scales[i] = x + x *= 10 + } +} diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go new file mode 100644 index 000000000..1aadcf407 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/format.go @@ -0,0 +1,533 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "strconv" + "unicode/utf8" + + "golang.org/x/text/language" +) + +// TODO: +// - grouping of fractions +// - allow user-defined superscript notation (such as 4) +// - same for non-breaking spaces, like   + +// A VisibleDigits computes digits, comma placement and trailing zeros as they +// will be shown to the user. +type VisibleDigits interface { + Digits(buf []byte, t language.Tag, scale int) Digits + // TODO: Do we also need to add the verb or pass a format.State? +} + +// Formatting proceeds along the following lines: +// 0) Compose rounding information from format and context. +// 1) Convert a number into a Decimal. +// 2) Sanitize Decimal by adding trailing zeros, removing leading digits, and +// (non-increment) rounding. The Decimal that results from this is suitable +// for determining the plural form. +// 3) Render the Decimal in the localized form. + +// Formatter contains all the information needed to render a number. +type Formatter struct { + Pattern + Info +} + +func (f *Formatter) init(t language.Tag, index []uint8) { + f.Info = InfoFromTag(t) + f.Pattern = formats[index[tagToID(t)]] +} + +// InitPattern initializes a Formatter for the given Pattern. +func (f *Formatter) InitPattern(t language.Tag, pat *Pattern) { + f.Info = InfoFromTag(t) + f.Pattern = *pat +} + +// InitDecimal initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitDecimal(t language.Tag) { + f.init(t, tagToDecimal) +} + +// InitScientific initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitScientific(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 +} + +// InitEngineering initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitEngineering(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 + f.Pattern.MaxIntegerDigits = 3 + f.Pattern.MinIntegerDigits = 1 +} + +// InitPercent initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPercent(t language.Tag) { + f.init(t, tagToPercent) +} + +// InitPerMille initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPerMille(t language.Tag) { + f.init(t, tagToPercent) + f.Pattern.DigitShift = 3 +} + +func (f *Formatter) Append(dst []byte, x interface{}) []byte { + var d Decimal + r := f.RoundingContext + d.Convert(r, x) + return f.Render(dst, FormatDigits(&d, r)) +} + +func FormatDigits(d *Decimal, r RoundingContext) Digits { + if r.isScientific() { + return scientificVisibleDigits(r, d) + } + return decimalVisibleDigits(r, d) +} + +func (f *Formatter) Format(dst []byte, d *Decimal) []byte { + return f.Render(dst, FormatDigits(d, f.RoundingContext)) +} + +func (f *Formatter) Render(dst []byte, d Digits) []byte { + var result []byte + var postPrefix, preSuffix int + if d.IsScientific { + result, postPrefix, preSuffix = appendScientific(dst, f, &d) + } else { + result, postPrefix, preSuffix = appendDecimal(dst, f, &d) + } + if f.PadRune == 0 { + return result + } + width := int(f.FormatWidth) + if count := utf8.RuneCount(result); count < width { + insertPos := 0 + switch f.Flags & PadMask { + case PadAfterPrefix: + insertPos = postPrefix + case PadBeforeSuffix: + insertPos = preSuffix + case PadAfterSuffix: + insertPos = len(result) + } + num := width - count + pad := [utf8.UTFMax]byte{' '} + sz := 1 + if r := f.PadRune; r != 0 { + sz = utf8.EncodeRune(pad[:], r) + } + extra := sz * num + if n := len(result) + extra; n < cap(result) { + result = result[:n] + copy(result[insertPos+extra:], result[insertPos:]) + } else { + buf := make([]byte, n) + copy(buf, result[:insertPos]) + copy(buf[insertPos+extra:], result[insertPos:]) + result = buf + } + for ; num > 0; num-- { + insertPos += copy(result[insertPos:], pad[:sz]) + } + } + return result +} + +// decimalVisibleDigits converts d according to the RoundingContext. Note that +// the exponent may change as a result of this operation. +func decimalVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits} + + exp := n.Exp + exp += int32(r.DigitShift) + + // Cap integer digits. Remove *most-significant* digits. + if r.MaxIntegerDigits > 0 { + if p := int(exp) - int(r.MaxIntegerDigits); p > 0 { + if p > len(n.Digits) { + p = len(n.Digits) + } + if n.Digits = n.Digits[p:]; len(n.Digits) == 0 { + exp = 0 + } else { + exp -= int32(p) + } + // Strip leading zeros. + for len(n.Digits) > 0 && n.Digits[0] == 0 { + n.Digits = n.Digits[1:] + exp-- + } + } + } + + // Rounding if not already done by Convert. + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 { + if cap := int(exp) + maxFrac; cap < p { + p = int(exp) + maxFrac + } + if p < 0 { + p = 0 + } + } + n.round(r.Mode, p) + + // set End (trailing zeros) + n.End = int32(len(n.Digits)) + if n.End == 0 { + exp = 0 + if r.MinFractionDigits > 0 { + n.End = int32(r.MinFractionDigits) + } + if p := int32(r.MinSignificantDigits) - 1; p > n.End { + n.End = p + } + } else { + if end := exp + int32(r.MinFractionDigits); end > n.End { + n.End = end + } + if n.End < int32(r.MinSignificantDigits) { + n.End = int32(r.MinSignificantDigits) + } + } + n.Exp = exp + return n +} + +// appendDecimal appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendDecimal(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, len(dst) + } + digits := n.Digits + exp := n.Exp + + // Split in integer and fraction part. + var intDigits, fracDigits []byte + numInt := 0 + numFrac := int(n.End - n.Exp) + if exp > 0 { + numInt = int(exp) + if int(exp) >= len(digits) { // ddddd | ddddd00 + intDigits = digits + } else { // ddd.dd + intDigits = digits[:exp] + fracDigits = digits[exp:] + } + } else { + fracDigits = digits + } + + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + minInt := int(f.MinIntegerDigits) + if minInt == 0 && f.MinSignificantDigits > 0 { + minInt = 1 + } + // add leading zeros + for i := minInt; i > numInt; i-- { + dst = f.AppendDigit(dst, 0) + if f.needsSep(i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + // Add trailing zeros + i = 0 + for n := -int(n.Exp); i < n; i++ { + dst = f.AppendDigit(dst, 0) + } + for _, d := range fracDigits { + i++ + dst = f.AppendDigit(dst, d) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +func scientificVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits, IsScientific: true} + + // Normalize to have at least one digit. This simplifies engineering + // notation. + if len(n.Digits) == 0 { + n.Digits = append(n.Digits, 0) + n.Exp = 1 + } + + // Significant digits are transformed by the parser for scientific notation + // and do not need to be handled here. + maxInt, numInt := int(r.MaxIntegerDigits), int(r.MinIntegerDigits) + if numInt == 0 { + numInt = 1 + } + + // If a maximum number of integers is specified, the minimum must be 1 + // and the exponent is grouped by this number (e.g. for engineering) + if maxInt > numInt { + // Correct the exponent to reflect a single integer digit. + numInt = 1 + // engineering + // 0.01234 ([12345]e-1) -> 1.2345e-2 12.345e-3 + // 12345 ([12345]e+5) -> 1.2345e4 12.345e3 + d := int(n.Exp-1) % maxInt + if d < 0 { + d += maxInt + } + numInt += d + } + + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 && numInt+maxFrac < p { + p = numInt + maxFrac + } + n.round(r.Mode, p) + + n.Comma = uint8(numInt) + n.End = int32(len(n.Digits)) + if minSig := int32(r.MinFractionDigits) + int32(numInt); n.End < minSig { + n.End = minSig + } + return n +} + +// appendScientific appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, 0 + } + digits := n.Digits + numInt := int(n.Comma) + numFrac := int(n.End) - int(n.Comma) + + var intDigits, fracDigits []byte + if numInt <= len(digits) { + intDigits = digits[:numInt] + fracDigits = digits[numInt:] + } else { + intDigits = digits + } + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + i = 0 + for ; i < len(fracDigits); i++ { + dst = f.AppendDigit(dst, fracDigits[i]) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + + // exp + buf := [12]byte{} + // TODO: use exponential if superscripting is not available (no Latin + // numbers or no tags) and use exponential in all other cases. + exp := n.Exp - int32(n.Comma) + exponential := f.Symbol(SymExponential) + if exponential == "E" { + dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) + dst = f.AppendDigit(dst, 1) + dst = f.AppendDigit(dst, 0) + switch { + case exp < 0: + dst = append(dst, superMinus...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, superPlus...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = append(dst, superDigits[0]...) + } + for _, c := range b { + dst = append(dst, superDigits[c-'0']...) + } + } else { + dst = append(dst, exponential...) + switch { + case exp < 0: + dst = append(dst, f.Symbol(SymMinusSign)...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, f.Symbol(SymPlusSign)...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = f.AppendDigit(dst, 0) + } + for _, c := range b { + dst = f.AppendDigit(dst, c-'0') + } + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +const ( + superMinus = "\u207B" // SUPERSCRIPT HYPHEN-MINUS + superPlus = "\u207A" // SUPERSCRIPT PLUS SIGN +) + +var ( + // Note: the digits are not sequential!!! + superDigits = []string{ + "\u2070", // SUPERSCRIPT DIGIT ZERO + "\u00B9", // SUPERSCRIPT DIGIT ONE + "\u00B2", // SUPERSCRIPT DIGIT TWO + "\u00B3", // SUPERSCRIPT DIGIT THREE + "\u2074", // SUPERSCRIPT DIGIT FOUR + "\u2075", // SUPERSCRIPT DIGIT FIVE + "\u2076", // SUPERSCRIPT DIGIT SIX + "\u2077", // SUPERSCRIPT DIGIT SEVEN + "\u2078", // SUPERSCRIPT DIGIT EIGHT + "\u2079", // SUPERSCRIPT DIGIT NINE + } +) + +func (f *Formatter) getAffixes(neg bool) (affix, suffix string) { + str := f.Affix + if str != "" { + if f.NegOffset > 0 { + if neg { + str = str[f.NegOffset:] + } else { + str = str[:f.NegOffset] + } + } + sufStart := 1 + str[0] + affix = str[1:sufStart] + suffix = str[sufStart+1:] + } + // TODO: introduce a NeedNeg sign to indicate if the left pattern already + // has a sign marked? + if f.NegOffset == 0 && (neg || f.Flags&AlwaysSign != 0) { + affix = "-" + affix + } + return affix, suffix +} + +func (f *Formatter) renderSpecial(dst []byte, d *Digits) (b []byte, ok bool) { + if d.NaN { + return fmtNaN(dst, f), true + } + if d.Inf { + return fmtInfinite(dst, f, d), true + } + return dst, false +} + +func fmtNaN(dst []byte, f *Formatter) []byte { + return append(dst, f.Symbol(SymNan)...) +} + +func fmtInfinite(dst []byte, f *Formatter, d *Digits) []byte { + affix, suffix := f.getAffixes(d.Neg) + dst = appendAffix(dst, f, affix, d.Neg) + dst = append(dst, f.Symbol(SymInfinity)...) + dst = appendAffix(dst, f, suffix, d.Neg) + return dst +} + +func appendAffix(dst []byte, f *Formatter, affix string, neg bool) []byte { + quoting := false + escaping := false + for _, r := range affix { + switch { + case escaping: + // escaping occurs both inside and outside of quotes + dst = append(dst, string(r)...) + escaping = false + case r == '\\': + escaping = true + case r == '\'': + quoting = !quoting + case quoting: + dst = append(dst, string(r)...) + case r == '%': + if f.DigitShift == 3 { + dst = append(dst, f.Symbol(SymPerMille)...) + } else { + dst = append(dst, f.Symbol(SymPercentSign)...) + } + case r == '-' || r == '+': + if neg { + dst = append(dst, f.Symbol(SymMinusSign)...) + } else if f.Flags&ElideSign == 0 { + dst = append(dst, f.Symbol(SymPlusSign)...) + } else { + dst = append(dst, ' ') + } + default: + dst = append(dst, string(r)...) + } + } + return dst +} diff --git a/vendor/golang.org/x/text/internal/number/number.go b/vendor/golang.org/x/text/internal/number/number.go new file mode 100644 index 000000000..e1d933c3f --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/number.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package number contains tools and data for formatting numbers. +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/language" +) + +// Info holds number formatting configuration data. +type Info struct { + system systemData // numbering system information + symIndex symOffset // index to symbols +} + +// InfoFromLangID returns a Info for the given compact language identifier and +// numbering system identifier. If system is the empty string, the default +// numbering system will be taken for that language. +func InfoFromLangID(compactIndex compact.ID, numberSystem string) Info { + p := langToDefaults[compactIndex] + // Lookup the entry for the language. + pSymIndex := symOffset(0) // Default: Latin, default symbols + system, ok := systemMap[numberSystem] + if !ok { + // Take the value for the default numbering system. This is by far the + // most common case as an alternative numbering system is hardly used. + if p&hasNonLatnMask == 0 { // Latn digits. + pSymIndex = p + } else { // Non-Latn or multiple numbering systems. + // Take the first entry from the alternatives list. + data := langToAlt[p&^hasNonLatnMask] + pSymIndex = data.symIndex + system = data.system + } + } else { + langIndex := compactIndex + ns := system + outerLoop: + for ; ; p = langToDefaults[langIndex] { + if p&hasNonLatnMask == 0 { + if ns == 0 { + // The index directly points to the symbol data. + pSymIndex = p + break + } + // Move to the parent and retry. + langIndex = langIndex.Parent() + } else { + // The index points to a list of symbol data indexes. + for _, e := range langToAlt[p&^hasNonLatnMask:] { + if e.compactTag != langIndex { + if langIndex == 0 { + // The CLDR root defines full symbol information for + // all numbering systems (even though mostly by + // means of aliases). Fall back to the default entry + // for Latn if there is no data for the numbering + // system of this language. + if ns == 0 { + break + } + // Fall back to Latin and start from the original + // language. See + // https://unicode.org/reports/tr35/#Locale_Inheritance. + ns = numLatn + langIndex = compactIndex + continue outerLoop + } + // Fall back to parent. + langIndex = langIndex.Parent() + } else if e.system == ns { + pSymIndex = e.symIndex + break outerLoop + } + } + } + } + } + if int(system) >= len(numSysData) { // algorithmic + // Will generate ASCII digits in case the user inadvertently calls + // WriteDigit or Digit on it. + d := numSysData[0] + d.id = system + return Info{ + system: d, + symIndex: pSymIndex, + } + } + return Info{ + system: numSysData[system], + symIndex: pSymIndex, + } +} + +// InfoFromTag returns a Info for the given language tag. +func InfoFromTag(t language.Tag) Info { + return InfoFromLangID(tagToID(t), t.TypeForKey("nu")) +} + +// IsDecimal reports if the numbering system can convert decimal to native +// symbols one-to-one. +func (n Info) IsDecimal() bool { + return int(n.system.id) < len(numSysData) +} + +// WriteDigit writes the UTF-8 sequence for n corresponding to the given ASCII +// digit to dst and reports the number of bytes written. dst must be large +// enough to hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) WriteDigit(dst []byte, asciiDigit rune) int { + copy(dst, n.system.zero[:n.system.digitSize]) + dst[n.system.digitSize-1] += byte(asciiDigit - '0') + return int(n.system.digitSize) +} + +// AppendDigit appends the UTF-8 sequence for n corresponding to the given digit +// to dst and reports the number of bytes written. dst must be large enough to +// hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) AppendDigit(dst []byte, digit byte) []byte { + dst = append(dst, n.system.zero[:n.system.digitSize]...) + dst[len(dst)-1] += digit + return dst +} + +// Digit returns the digit for the numbering system for the corresponding ASCII +// value. For example, ni.Digit('3') could return '三'. Note that the argument +// is the rune constant '3', which equals 51, not the integer constant 3. +func (n Info) Digit(asciiDigit rune) rune { + var x [utf8.UTFMax]byte + n.WriteDigit(x[:], asciiDigit) + r, _ := utf8.DecodeRune(x[:]) + return r +} + +// Symbol returns the string for the given symbol type. +func (n Info) Symbol(t SymbolType) string { + return symData.Elem(int(symIndex[n.symIndex][t])) +} + +func formatForLang(t language.Tag, index []byte) *Pattern { + return &formats[index[tagToID(t)]] +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/vendor/golang.org/x/text/internal/number/pattern.go b/vendor/golang.org/x/text/internal/number/pattern.go new file mode 100644 index 000000000..06e59559a --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/pattern.go @@ -0,0 +1,485 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "errors" + "unicode/utf8" +) + +// This file contains a parser for the CLDR number patterns as described in +// https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +// +// The following BNF is derived from this standard. +// +// pattern := subpattern (';' subpattern)? +// subpattern := affix? number exponent? affix? +// number := decimal | sigDigits +// decimal := '#'* '0'* ('.' fraction)? | '#' | '0' +// fraction := '0'* '#'* +// sigDigits := '#'* '@' '@'* '#'* +// exponent := 'E' '+'? '0'* '0' +// padSpec := '*' \L +// +// Notes: +// - An affix pattern may contain any runes, but runes with special meaning +// should be escaped. +// - Sequences of digits, '#', and '@' in decimal and sigDigits may have +// interstitial commas. + +// TODO: replace special characters in affixes (-, +, ¤) with control codes. + +// Pattern holds information for formatting numbers. It is designed to hold +// information from CLDR number patterns. +// +// This pattern is precompiled for all patterns for all languages. Even though +// the number of patterns is not very large, we want to keep this small. +// +// This type is only intended for internal use. +type Pattern struct { + RoundingContext + + Affix string // includes prefix and suffix. First byte is prefix length. + Offset uint16 // Offset into Affix for prefix and suffix + NegOffset uint16 // Offset into Affix for negative prefix and suffix or 0. + PadRune rune + FormatWidth uint16 + + GroupingSize [2]uint8 + Flags PatternFlag +} + +// A RoundingContext indicates how a number should be converted to digits. +// It contains all information needed to determine the "visible digits" as +// required by the pluralization rules. +type RoundingContext struct { + // TODO: unify these two fields so that there is a more unambiguous meaning + // of how precision is handled. + MaxSignificantDigits int16 // -1 is unlimited + MaxFractionDigits int16 // -1 is unlimited + + Increment uint32 + IncrementScale uint8 // May differ from printed scale. + + Mode RoundingMode + + DigitShift uint8 // Number of decimals to shift. Used for % and ‰. + + // Number of digits. + MinIntegerDigits uint8 + + MaxIntegerDigits uint8 + MinFractionDigits uint8 + MinSignificantDigits uint8 + + MinExponentDigits uint8 +} + +// RoundSignificantDigits returns the number of significant digits an +// implementation of Convert may round to or n < 0 if there is no maximum or +// a maximum is not recommended. +func (r *RoundingContext) RoundSignificantDigits() (n int) { + if r.MaxFractionDigits == 0 && r.MaxSignificantDigits > 0 { + return int(r.MaxSignificantDigits) + } else if r.isScientific() && r.MaxIntegerDigits == 1 { + if r.MaxSignificantDigits == 0 || + int(r.MaxFractionDigits+1) == int(r.MaxSignificantDigits) { + // Note: don't add DigitShift: it is only used for decimals. + return int(r.MaxFractionDigits) + 1 + } + } + return -1 +} + +// RoundFractionDigits returns the number of fraction digits an implementation +// of Convert may round to or n < 0 if there is no maximum or a maximum is not +// recommended. +func (r *RoundingContext) RoundFractionDigits() (n int) { + if r.MinExponentDigits == 0 && + r.MaxSignificantDigits == 0 && + r.MaxFractionDigits >= 0 { + return int(r.MaxFractionDigits) + int(r.DigitShift) + } + return -1 +} + +// SetScale fixes the RoundingContext to a fixed number of fraction digits. +func (r *RoundingContext) SetScale(scale int) { + r.MinFractionDigits = uint8(scale) + r.MaxFractionDigits = int16(scale) +} + +func (r *RoundingContext) SetPrecision(prec int) { + r.MaxSignificantDigits = int16(prec) +} + +func (r *RoundingContext) isScientific() bool { + return r.MinExponentDigits > 0 +} + +func (f *Pattern) needsSep(pos int) bool { + p := pos - 1 + size := int(f.GroupingSize[0]) + if size == 0 || p == 0 { + return false + } + if p == size { + return true + } + if p -= size; p < 0 { + return false + } + // TODO: make second groupingsize the same as first if 0 so that we can + // avoid this check. + if x := int(f.GroupingSize[1]); x != 0 { + size = x + } + return p%size == 0 +} + +// A PatternFlag is a bit mask for the flag field of a Pattern. +type PatternFlag uint8 + +const ( + AlwaysSign PatternFlag = 1 << iota + ElideSign // Use space instead of plus sign. AlwaysSign must be true. + AlwaysExpSign + AlwaysDecimalSeparator + ParenthesisForNegative // Common pattern. Saves space. + + PadAfterNumber + PadAfterAffix + + PadBeforePrefix = 0 // Default + PadAfterPrefix = PadAfterAffix + PadBeforeSuffix = PadAfterNumber + PadAfterSuffix = PadAfterNumber | PadAfterAffix + PadMask = PadAfterNumber | PadAfterAffix +) + +type parser struct { + *Pattern + + leadingSharps int + + pos int + err error + doNotTerminate bool + groupingCount uint + hasGroup bool + buf []byte +} + +func (p *parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *parser) updateGrouping() { + if p.hasGroup && + 0 < p.groupingCount && p.groupingCount < 255 { + p.GroupingSize[1] = p.GroupingSize[0] + p.GroupingSize[0] = uint8(p.groupingCount) + } + p.groupingCount = 0 + p.hasGroup = true +} + +var ( + // TODO: more sensible and localizeable error messages. + errMultiplePadSpecifiers = errors.New("format: pattern has multiple pad specifiers") + errInvalidPadSpecifier = errors.New("format: invalid pad specifier") + errInvalidQuote = errors.New("format: invalid quote") + errAffixTooLarge = errors.New("format: prefix or suffix exceeds maximum UTF-8 length of 256 bytes") + errDuplicatePercentSign = errors.New("format: duplicate percent sign") + errDuplicatePermilleSign = errors.New("format: duplicate permille sign") + errUnexpectedEnd = errors.New("format: unexpected end of pattern") +) + +// ParsePattern extracts formatting information from a CLDR number pattern. +// +// See https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +func ParsePattern(s string) (f *Pattern, err error) { + p := parser{Pattern: &Pattern{}} + + s = p.parseSubPattern(s) + + if s != "" { + // Parse negative sub pattern. + if s[0] != ';' { + p.setError(errors.New("format: error parsing first sub pattern")) + return nil, p.err + } + neg := parser{Pattern: &Pattern{}} // just for extracting the affixes. + s = neg.parseSubPattern(s[len(";"):]) + p.NegOffset = uint16(len(p.buf)) + p.buf = append(p.buf, neg.buf...) + } + if s != "" { + p.setError(errors.New("format: spurious characters at end of pattern")) + } + if p.err != nil { + return nil, p.err + } + if affix := string(p.buf); affix == "\x00\x00" || affix == "\x00\x00\x00\x00" { + // No prefix or suffixes. + p.NegOffset = 0 + } else { + p.Affix = affix + } + if p.Increment == 0 { + p.IncrementScale = 0 + } + return p.Pattern, nil +} + +func (p *parser) parseSubPattern(s string) string { + s = p.parsePad(s, PadBeforePrefix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterPrefix) + + s = p.parse(p.number, s) + p.updateGrouping() + + s = p.parsePad(s, PadBeforeSuffix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterSuffix) + return s +} + +func (p *parser) parsePad(s string, f PatternFlag) (tail string) { + if len(s) >= 2 && s[0] == '*' { + r, sz := utf8.DecodeRuneInString(s[1:]) + if p.PadRune != 0 { + p.err = errMultiplePadSpecifiers + } else { + p.Flags |= f + p.PadRune = r + } + return s[1+sz:] + } + return s +} + +func (p *parser) parseAffix(s string) string { + x := len(p.buf) + p.buf = append(p.buf, 0) // placeholder for affix length + + s = p.parse(p.affix, s) + + n := len(p.buf) - x - 1 + if n > 0xFF { + p.setError(errAffixTooLarge) + } + p.buf[x] = uint8(n) + return s +} + +// state implements a state transition. It returns the new state. A state +// function may set an error on the parser or may simply return on an incorrect +// token and let the next phase fail. +type state func(r rune) state + +// parse repeatedly applies a state function on the given string until a +// termination condition is reached. +func (p *parser) parse(fn state, s string) (tail string) { + for i, r := range s { + p.doNotTerminate = false + if fn = fn(r); fn == nil || p.err != nil { + return s[i:] + } + p.FormatWidth++ + } + if p.doNotTerminate { + p.setError(errUnexpectedEnd) + } + return "" +} + +func (p *parser) affix(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '#', '@', '.', '*', ',', ';': + return nil + case '\'': + p.FormatWidth-- + return p.escapeFirst + case '%': + if p.DigitShift != 0 { + p.setError(errDuplicatePercentSign) + } + p.DigitShift = 2 + case '\u2030': // ‰ Per mille + if p.DigitShift != 0 { + p.setError(errDuplicatePermilleSign) + } + p.DigitShift = 3 + // TODO: handle currency somehow: ¤, ¤¤, ¤¤¤, ¤¤¤¤ + } + p.buf = append(p.buf, string(r)...) + return p.affix +} + +func (p *parser) escapeFirst(r rune) state { + switch r { + case '\'': + p.buf = append(p.buf, "\\'"...) + return p.affix + default: + p.buf = append(p.buf, '\'') + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +func (p *parser) escape(r rune) state { + switch r { + case '\'': + p.FormatWidth-- + p.buf = append(p.buf, '\'') + return p.affix + default: + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +// number parses a number. The BNF says the integer part should always have +// a '0', but that does not appear to be the case according to the rest of the +// documentation. We will allow having only '#' numbers. +func (p *parser) number(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.leadingSharps++ + case '@': + p.groupingCount++ + p.leadingSharps = 0 + p.MaxFractionDigits = -1 + return p.sigDigits(r) + case ',': + if p.leadingSharps == 0 { // no leading commas + return nil + } + p.updateGrouping() + case 'E': + p.MaxIntegerDigits = uint8(p.leadingSharps) + return p.exponent + case '.': // allow ".##" etc. + p.updateGrouping() + return p.fraction + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return p.integer(r) + default: + return nil + } + return p.number +} + +func (p *parser) integer(r rune) state { + if !('0' <= r && r <= '9') { + var next state + switch r { + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + next = p.exponent + case '.': + next = p.fraction + case ',': + next = p.integer + } + p.updateGrouping() + return next + } + p.Increment = p.Increment*10 + uint32(r-'0') + p.groupingCount++ + p.MinIntegerDigits++ + return p.integer +} + +func (p *parser) sigDigits(r rune) state { + switch r { + case '@': + p.groupingCount++ + p.MaxSignificantDigits++ + p.MinSignificantDigits++ + case '#': + return p.sigDigitsFinal(r) + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigits +} + +func (p *parser) sigDigitsFinal(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.MaxSignificantDigits++ + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigitsFinal +} + +func (p *parser) normalizeSigDigitsWithExponent() state { + p.MinIntegerDigits, p.MaxIntegerDigits = 1, 1 + p.MinFractionDigits = p.MinSignificantDigits - 1 + p.MaxFractionDigits = p.MaxSignificantDigits - 1 + p.MinSignificantDigits, p.MaxSignificantDigits = 0, 0 + return p.exponent +} + +func (p *parser) fraction(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Increment = p.Increment*10 + uint32(r-'0') + p.IncrementScale++ + p.MinFractionDigits++ + p.MaxFractionDigits++ + case '#': + p.MaxFractionDigits++ + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + return p.exponent + default: + return nil + } + return p.fraction +} + +func (p *parser) exponent(r rune) state { + switch r { + case '+': + // Set mode and check it wasn't already set. + if p.Flags&AlwaysExpSign != 0 || p.MinExponentDigits > 0 { + break + } + p.Flags |= AlwaysExpSign + p.doNotTerminate = true + return p.exponent + case '0': + p.MinExponentDigits++ + return p.exponent + } + // termination condition + if p.MinExponentDigits == 0 { + p.setError(errors.New("format: need at least one digit")) + } + return nil +} diff --git a/vendor/golang.org/x/text/internal/number/roundingmode_string.go b/vendor/golang.org/x/text/internal/number/roundingmode_string.go new file mode 100644 index 000000000..bcc22471d --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/roundingmode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type RoundingMode"; DO NOT EDIT. + +package number + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ToNearestEven-0] + _ = x[ToNearestZero-1] + _ = x[ToNearestAway-2] + _ = x[ToPositiveInf-3] + _ = x[ToNegativeInf-4] + _ = x[ToZero-5] + _ = x[AwayFromZero-6] + _ = x[numModes-7] +} + +const _RoundingMode_name = "ToNearestEvenToNearestZeroToNearestAwayToPositiveInfToNegativeInfToZeroAwayFromZeronumModes" + +var _RoundingMode_index = [...]uint8{0, 13, 26, 39, 52, 65, 71, 83, 91} + +func (i RoundingMode) String() string { + if i >= RoundingMode(len(_RoundingMode_index)-1) { + return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]] +} diff --git a/vendor/golang.org/x/text/internal/number/tables.go b/vendor/golang.org/x/text/internal/number/tables.go new file mode 100644 index 000000000..8efce81b5 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/tables.go @@ -0,0 +1,1219 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import "golang.org/x/text/internal/stringset" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var numSysData = []systemData{ // 59 elements + 0: {id: 0x0, digitSize: 0x1, zero: [4]uint8{0x30, 0x0, 0x0, 0x0}}, + 1: {id: 0x1, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9e, 0xa5, 0x90}}, + 2: {id: 0x2, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9c, 0xb0}}, + 3: {id: 0x3, digitSize: 0x2, zero: [4]uint8{0xd9, 0xa0, 0x0, 0x0}}, + 4: {id: 0x4, digitSize: 0x2, zero: [4]uint8{0xdb, 0xb0, 0x0, 0x0}}, + 5: {id: 0x5, digitSize: 0x3, zero: [4]uint8{0xe1, 0xad, 0x90, 0x0}}, + 6: {id: 0x6, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa7, 0xa6, 0x0}}, + 7: {id: 0x7, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb1, 0x90}}, + 8: {id: 0x8, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x81, 0xa6}}, + 9: {id: 0x9, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x84, 0xb6}}, + 10: {id: 0xa, digitSize: 0x3, zero: [4]uint8{0xea, 0xa9, 0x90, 0x0}}, + 11: {id: 0xb, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa5, 0xa6, 0x0}}, + 12: {id: 0xc, digitSize: 0x3, zero: [4]uint8{0xef, 0xbc, 0x90, 0x0}}, + 13: {id: 0xd, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb5, 0x90}}, + 14: {id: 0xe, digitSize: 0x3, zero: [4]uint8{0xe0, 0xab, 0xa6, 0x0}}, + 15: {id: 0xf, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa9, 0xa6, 0x0}}, + 16: {id: 0x10, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xad, 0x90}}, + 17: {id: 0x11, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0x90, 0x0}}, + 18: {id: 0x12, digitSize: 0x3, zero: [4]uint8{0xea, 0xa4, 0x80, 0x0}}, + 19: {id: 0x13, digitSize: 0x3, zero: [4]uint8{0xe1, 0x9f, 0xa0, 0x0}}, + 20: {id: 0x14, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb3, 0xa6, 0x0}}, + 21: {id: 0x15, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x80, 0x0}}, + 22: {id: 0x16, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x90, 0x0}}, + 23: {id: 0x17, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbb, 0x90, 0x0}}, + 24: {id: 0x18, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x80, 0x0}}, + 25: {id: 0x19, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa5, 0x86, 0x0}}, + 26: {id: 0x1a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x8e}}, + 27: {id: 0x1b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x98}}, + 28: {id: 0x1c, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xb6}}, + 29: {id: 0x1d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xac}}, + 30: {id: 0x1e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xa2}}, + 31: {id: 0x1f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb5, 0xa6, 0x0}}, + 32: {id: 0x20, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x99, 0x90}}, + 33: {id: 0x21, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa0, 0x90, 0x0}}, + 34: {id: 0x22, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xa9, 0xa0}}, + 35: {id: 0x23, digitSize: 0x3, zero: [4]uint8{0xea, 0xaf, 0xb0, 0x0}}, + 36: {id: 0x24, digitSize: 0x3, zero: [4]uint8{0xe1, 0x81, 0x80, 0x0}}, + 37: {id: 0x25, digitSize: 0x3, zero: [4]uint8{0xe1, 0x82, 0x90, 0x0}}, + 38: {id: 0x26, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0xb0, 0x0}}, + 39: {id: 0x27, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x91, 0x90}}, + 40: {id: 0x28, digitSize: 0x2, zero: [4]uint8{0xdf, 0x80, 0x0, 0x0}}, + 41: {id: 0x29, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x90, 0x0}}, + 42: {id: 0x2a, digitSize: 0x3, zero: [4]uint8{0xe0, 0xad, 0xa6, 0x0}}, + 43: {id: 0x2b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x90, 0x92, 0xa0}}, + 44: {id: 0x2c, digitSize: 0x3, zero: [4]uint8{0xea, 0xa3, 0x90, 0x0}}, + 45: {id: 0x2d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x87, 0x90}}, + 46: {id: 0x2e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x8b, 0xb0}}, + 47: {id: 0x2f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb7, 0xa6, 0x0}}, + 48: {id: 0x30, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x83, 0xb0}}, + 49: {id: 0x31, digitSize: 0x3, zero: [4]uint8{0xe1, 0xae, 0xb0, 0x0}}, + 50: {id: 0x32, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9b, 0x80}}, + 51: {id: 0x33, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa7, 0x90, 0x0}}, + 52: {id: 0x34, digitSize: 0x3, zero: [4]uint8{0xe0, 0xaf, 0xa6, 0x0}}, + 53: {id: 0x35, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb1, 0xa6, 0x0}}, + 54: {id: 0x36, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb9, 0x90, 0x0}}, + 55: {id: 0x37, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbc, 0xa0, 0x0}}, + 56: {id: 0x38, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x93, 0x90}}, + 57: {id: 0x39, digitSize: 0x3, zero: [4]uint8{0xea, 0x98, 0xa0, 0x0}}, + 58: {id: 0x3a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xa3, 0xa0}}, +} // Size: 378 bytes + +const ( + numAdlm = 0x1 + numAhom = 0x2 + numArab = 0x3 + numArabext = 0x4 + numArmn = 0x3b + numArmnlow = 0x3c + numBali = 0x5 + numBeng = 0x6 + numBhks = 0x7 + numBrah = 0x8 + numCakm = 0x9 + numCham = 0xa + numCyrl = 0x3d + numDeva = 0xb + numEthi = 0x3e + numFullwide = 0xc + numGeor = 0x3f + numGonm = 0xd + numGrek = 0x40 + numGreklow = 0x41 + numGujr = 0xe + numGuru = 0xf + numHanidays = 0x42 + numHanidec = 0x43 + numHans = 0x44 + numHansfin = 0x45 + numHant = 0x46 + numHantfin = 0x47 + numHebr = 0x48 + numHmng = 0x10 + numJava = 0x11 + numJpan = 0x49 + numJpanfin = 0x4a + numKali = 0x12 + numKhmr = 0x13 + numKnda = 0x14 + numLana = 0x15 + numLanatham = 0x16 + numLaoo = 0x17 + numLatn = 0x0 + numLepc = 0x18 + numLimb = 0x19 + numMathbold = 0x1a + numMathdbl = 0x1b + numMathmono = 0x1c + numMathsanb = 0x1d + numMathsans = 0x1e + numMlym = 0x1f + numModi = 0x20 + numMong = 0x21 + numMroo = 0x22 + numMtei = 0x23 + numMymr = 0x24 + numMymrshan = 0x25 + numMymrtlng = 0x26 + numNewa = 0x27 + numNkoo = 0x28 + numOlck = 0x29 + numOrya = 0x2a + numOsma = 0x2b + numRoman = 0x4b + numRomanlow = 0x4c + numSaur = 0x2c + numShrd = 0x2d + numSind = 0x2e + numSinh = 0x2f + numSora = 0x30 + numSund = 0x31 + numTakr = 0x32 + numTalu = 0x33 + numTaml = 0x4d + numTamldec = 0x34 + numTelu = 0x35 + numThai = 0x36 + numTibt = 0x37 + numTirh = 0x38 + numVaii = 0x39 + numWara = 0x3a + numNumberSystems +) + +var systemMap = map[string]system{ + "adlm": numAdlm, + "ahom": numAhom, + "arab": numArab, + "arabext": numArabext, + "armn": numArmn, + "armnlow": numArmnlow, + "bali": numBali, + "beng": numBeng, + "bhks": numBhks, + "brah": numBrah, + "cakm": numCakm, + "cham": numCham, + "cyrl": numCyrl, + "deva": numDeva, + "ethi": numEthi, + "fullwide": numFullwide, + "geor": numGeor, + "gonm": numGonm, + "grek": numGrek, + "greklow": numGreklow, + "gujr": numGujr, + "guru": numGuru, + "hanidays": numHanidays, + "hanidec": numHanidec, + "hans": numHans, + "hansfin": numHansfin, + "hant": numHant, + "hantfin": numHantfin, + "hebr": numHebr, + "hmng": numHmng, + "java": numJava, + "jpan": numJpan, + "jpanfin": numJpanfin, + "kali": numKali, + "khmr": numKhmr, + "knda": numKnda, + "lana": numLana, + "lanatham": numLanatham, + "laoo": numLaoo, + "latn": numLatn, + "lepc": numLepc, + "limb": numLimb, + "mathbold": numMathbold, + "mathdbl": numMathdbl, + "mathmono": numMathmono, + "mathsanb": numMathsanb, + "mathsans": numMathsans, + "mlym": numMlym, + "modi": numModi, + "mong": numMong, + "mroo": numMroo, + "mtei": numMtei, + "mymr": numMymr, + "mymrshan": numMymrshan, + "mymrtlng": numMymrtlng, + "newa": numNewa, + "nkoo": numNkoo, + "olck": numOlck, + "orya": numOrya, + "osma": numOsma, + "roman": numRoman, + "romanlow": numRomanlow, + "saur": numSaur, + "shrd": numShrd, + "sind": numSind, + "sinh": numSinh, + "sora": numSora, + "sund": numSund, + "takr": numTakr, + "talu": numTalu, + "taml": numTaml, + "tamldec": numTamldec, + "telu": numTelu, + "thai": numThai, + "tibt": numTibt, + "tirh": numTirh, + "vaii": numVaii, + "wara": numWara, +} + +var symIndex = [][12]uint8{ // 81 elements + 0: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 1: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 2: [12]uint8{0x0, 0x1, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 3: [12]uint8{0x1, 0x0, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 4: [12]uint8{0x0, 0x1, 0x2, 0x11, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 5: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x12, 0xb}, + 6: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 7: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x13, 0xb}, + 8: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 9: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 10: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 11: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 12: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 13: [12]uint8{0x0, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 14: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x16, 0xb}, + 15: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 16: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0x0}, + 17: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 18: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 19: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 20: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x19, 0x1a, 0xa, 0xb}, + 21: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 22: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 23: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 24: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0x1d, 0xb}, + 25: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0x1e, 0x0}, + 26: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 27: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 28: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x1f, 0xb}, + 29: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 30: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x20, 0xb}, + 31: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x21, 0x7, 0x8, 0x9, 0x22, 0xb}, + 32: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x23, 0xb}, + 33: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x14, 0x8, 0x9, 0x24, 0xb}, + 34: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0x24, 0xb}, + 35: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x25, 0xb}, + 36: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x26, 0xb}, + 37: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x27, 0xb}, + 38: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 39: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x29, 0xb}, + 40: [12]uint8{0x1, 0x0, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 41: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2a, 0xb}, + 42: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2b, 0xb}, + 43: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x2c, 0x14, 0x8, 0x9, 0x24, 0xb}, + 44: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 45: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 46: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 47: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2d, 0x0}, + 48: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2e, 0xb}, + 49: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2f, 0xb}, + 50: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x30, 0x7, 0x8, 0x9, 0xa, 0xb}, + 51: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x31, 0xb}, + 52: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x32, 0xb}, + 53: [12]uint8{0x1, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 54: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x33, 0xb}, + 55: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x34, 0xb}, + 56: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 57: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0x3d, 0xb}, + 58: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 59: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 60: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x40, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 61: [12]uint8{0x35, 0x36, 0x37, 0x41, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 62: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 63: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 64: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x43, 0x7, 0x44, 0x9, 0x24, 0xb}, + 65: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x5, 0x3b, 0x7, 0x3c, 0x9, 0x33, 0xb}, + 66: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x46, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 67: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0x1d, 0xb}, + 68: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 69: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 70: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 71: [12]uint8{0x35, 0x1, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 72: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0x24, 0xb}, + 73: [12]uint8{0x35, 0x36, 0x2, 0x3, 0x45, 0x46, 0x43, 0x7, 0x8, 0x9, 0xa, 0x35}, + 74: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x31, 0x35}, + 75: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x32, 0x35}, + 76: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x48, 0x46, 0x43, 0x7, 0x3c, 0x9, 0x33, 0x35}, + 77: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x49}, + 78: [12]uint8{0x0, 0x1, 0x4a, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 79: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x4b, 0xb}, + 80: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x4c, 0x4d, 0xb}, +} // Size: 996 bytes + +var symData = stringset.Set{ + Data: "" + // Size: 599 bytes + ".,;%+-E׉∞NaN:\u00a0\u200e%\u200e\u200e+\u200e-ليس\u00a0رقمًا٪NDТерхьаш" + + "\u00a0дац·’mnne×10^0/00INF−\u200e−ناعددepälukuՈչԹარ\u00a0არის\u00a0რიცხვ" + + "იZMdMсан\u00a0емес¤¤¤сан\u00a0эмесບໍ່\u200bແມ່ນ\u200bໂຕ\u200bເລກNSဂဏန်" + + "းမဟုတ်သောННне\u00a0числочыыһыла\u00a0буотах·10^epilohosan\u00a0dälTFЕs" + + "on\u00a0emasҳақиқий\u00a0сон\u00a0эмас非數值非数值٫٬؛٪\u061c\u061c+\u061c-اس؉ل" + + "يس\u00a0رقم\u200f+\u200f-\u200f−٪\u200f\u061c−×۱۰^؉\u200f\u200e+\u200e" + + "\u200e-\u200e\u200e−\u200e+\u200e:၊ཨང་མེན་གྲངས་མེདཨང་མད", + Index: []uint16{ // 79 elements + // Entry 0 - 3F + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, + 0x0009, 0x000c, 0x000f, 0x0012, 0x0013, 0x0015, 0x001c, 0x0020, + 0x0024, 0x0036, 0x0038, 0x003a, 0x0050, 0x0052, 0x0055, 0x0058, + 0x0059, 0x005e, 0x0062, 0x0065, 0x0068, 0x006e, 0x0078, 0x0080, + 0x0086, 0x00ae, 0x00af, 0x00b2, 0x00c2, 0x00c8, 0x00d8, 0x0105, + 0x0107, 0x012e, 0x0132, 0x0142, 0x015e, 0x0163, 0x016a, 0x0173, + 0x0175, 0x0177, 0x0180, 0x01a0, 0x01a9, 0x01b2, 0x01b4, 0x01b6, + 0x01b8, 0x01bc, 0x01bf, 0x01c2, 0x01c6, 0x01c8, 0x01d6, 0x01da, + // Entry 40 - 7F + 0x01de, 0x01e4, 0x01e9, 0x01ee, 0x01f5, 0x01fa, 0x0201, 0x0208, + 0x0211, 0x0215, 0x0218, 0x021b, 0x0230, 0x0248, 0x0257, + }, +} // Size: 797 bytes + +// langToDefaults maps a compact language index to the default numbering system +// and default symbol set +var langToDefaults = [775]symOffset{ + // Entry 0 - 3F + 0x8000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000, + 0x0000, 0x0000, 0x8003, 0x0002, 0x0002, 0x0002, 0x0002, 0x0003, + 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, + 0x0003, 0x0003, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0004, + 0x0002, 0x0004, 0x0002, 0x0002, 0x0002, 0x0003, 0x0002, 0x0000, + 0x8005, 0x0000, 0x0000, 0x0000, 0x8006, 0x0005, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + // Entry 40 - 7F + 0x8009, 0x0000, 0x0000, 0x800a, 0x0000, 0x0000, 0x800c, 0x0001, + 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x800e, 0x0000, 0x0000, 0x0007, + 0x0007, 0x0000, 0x0000, 0x0000, 0x0000, 0x800f, 0x0008, 0x0008, + 0x8011, 0x0001, 0x0001, 0x0001, 0x803c, 0x0000, 0x0009, 0x0009, + 0x0009, 0x0000, 0x0000, 0x000a, 0x000b, 0x000a, 0x000c, 0x000a, + 0x000a, 0x000c, 0x000a, 0x000d, 0x000d, 0x000a, 0x000a, 0x0001, + 0x0001, 0x0000, 0x0001, 0x0001, 0x803f, 0x0000, 0x0000, 0x0000, + // Entry 80 - BF + 0x000e, 0x000e, 0x000e, 0x000f, 0x000f, 0x000f, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x000a, 0x0010, 0x0000, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x000a, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x0009, 0x0000, + 0x0000, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + // Entry C0 - FF + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0013, 0x0000, + 0x0000, 0x000f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000, 0x0015, + 0x0015, 0x0006, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, + 0x0006, 0x0001, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, + // Entry 100 - 13F + 0x0000, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, + 0x0000, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0016, 0x0016, + 0x0017, 0x0017, 0x0001, 0x0001, 0x8041, 0x0018, 0x0018, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0019, 0x0019, 0x0000, 0x0000, + 0x0017, 0x0017, 0x0017, 0x8044, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, + // Entry 140 - 17F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0006, 0x0006, 0x0000, 0x0000, + 0x8047, 0x0000, 0x0006, 0x0006, 0x001a, 0x001a, 0x001a, 0x001a, + 0x804a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x804c, 0x001b, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x000a, 0x000a, 0x0001, 0x0001, + 0x001c, 0x001c, 0x0009, 0x0009, 0x804f, 0x0000, 0x0000, 0x0000, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x8052, 0x0006, 0x0006, 0x001d, 0x0006, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001e, 0x001e, 0x001f, + 0x001f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, + 0x0001, 0x000d, 0x000d, 0x0000, 0x0000, 0x0020, 0x0020, 0x0006, + 0x0006, 0x0021, 0x0021, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x8054, 0x0000, 0x0000, 0x0000, 0x0000, 0x8056, 0x001b, + 0x0000, 0x0000, 0x0001, 0x0001, 0x0022, 0x0022, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x0000, 0x0023, 0x0023, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0024, 0x0024, 0x8058, 0x0000, 0x0000, 0x0016, 0x0016, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0025, 0x0025, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000d, 0x000d, 0x0000, 0x0000, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x805a, 0x0000, 0x0000, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x805b, 0x0026, 0x805d, + // Entry 200 - 23F + 0x0000, 0x0000, 0x0000, 0x0000, 0x805e, 0x0015, 0x0015, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x8061, 0x0000, 0x0000, 0x8062, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0001, + 0x0001, 0x0015, 0x0015, 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0027, 0x0027, 0x0027, 0x8065, 0x8067, + 0x001b, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, + 0x8069, 0x0028, 0x0006, 0x0001, 0x0006, 0x0001, 0x0001, 0x0001, + // Entry 240 - 27F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0006, 0x0000, 0x0000, 0x001a, 0x001a, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0029, 0x0029, 0x0029, 0x0029, + 0x0029, 0x0029, 0x0029, 0x0006, 0x0006, 0x0000, 0x0000, 0x002a, + 0x002a, 0x0000, 0x0000, 0x0000, 0x0000, 0x806b, 0x0000, 0x0000, + 0x002b, 0x002b, 0x002b, 0x002b, 0x0006, 0x0006, 0x000d, 0x000d, + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x002c, 0x002c, 0x002d, 0x002d, 0x002e, 0x002e, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x002f, 0x002f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, 0x806d, 0x0022, 0x0022, + 0x0022, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0030, 0x0030, 0x0000, 0x0000, 0x8071, 0x0031, 0x0006, + // Entry 2C0 - 2FF + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x000d, 0x000d, 0x0001, + 0x0001, 0x0000, 0x0000, 0x0032, 0x0032, 0x8074, 0x8076, 0x001b, + 0x8077, 0x8079, 0x0028, 0x807b, 0x0034, 0x0033, 0x0033, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0035, 0x0035, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0036, 0x0037, 0x0037, 0x0036, 0x0036, 0x0001, + 0x0001, 0x807d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8080, + // Entry 300 - 33F + 0x0036, 0x0036, 0x0036, 0x0000, 0x0000, 0x0006, 0x0014, +} // Size: 1550 bytes + +// langToAlt is a list of numbering system and symbol set pairs, sorted and +// marked by compact language index. +var langToAlt = []altSymData{ // 131 elements + 1: {compactTag: 0x0, symIndex: 0x38, system: 0x3}, + 2: {compactTag: 0x0, symIndex: 0x42, system: 0x4}, + 3: {compactTag: 0xa, symIndex: 0x39, system: 0x3}, + 4: {compactTag: 0xa, symIndex: 0x2, system: 0x0}, + 5: {compactTag: 0x28, symIndex: 0x0, system: 0x6}, + 6: {compactTag: 0x2c, symIndex: 0x5, system: 0x0}, + 7: {compactTag: 0x2c, symIndex: 0x3a, system: 0x3}, + 8: {compactTag: 0x2c, symIndex: 0x42, system: 0x4}, + 9: {compactTag: 0x40, symIndex: 0x0, system: 0x6}, + 10: {compactTag: 0x43, symIndex: 0x0, system: 0x0}, + 11: {compactTag: 0x43, symIndex: 0x4f, system: 0x37}, + 12: {compactTag: 0x46, symIndex: 0x1, system: 0x0}, + 13: {compactTag: 0x46, symIndex: 0x38, system: 0x3}, + 14: {compactTag: 0x54, symIndex: 0x0, system: 0x9}, + 15: {compactTag: 0x5d, symIndex: 0x3a, system: 0x3}, + 16: {compactTag: 0x5d, symIndex: 0x8, system: 0x0}, + 17: {compactTag: 0x60, symIndex: 0x1, system: 0x0}, + 18: {compactTag: 0x60, symIndex: 0x38, system: 0x3}, + 19: {compactTag: 0x60, symIndex: 0x42, system: 0x4}, + 20: {compactTag: 0x60, symIndex: 0x0, system: 0x5}, + 21: {compactTag: 0x60, symIndex: 0x0, system: 0x6}, + 22: {compactTag: 0x60, symIndex: 0x0, system: 0x8}, + 23: {compactTag: 0x60, symIndex: 0x0, system: 0x9}, + 24: {compactTag: 0x60, symIndex: 0x0, system: 0xa}, + 25: {compactTag: 0x60, symIndex: 0x0, system: 0xb}, + 26: {compactTag: 0x60, symIndex: 0x0, system: 0xc}, + 27: {compactTag: 0x60, symIndex: 0x0, system: 0xd}, + 28: {compactTag: 0x60, symIndex: 0x0, system: 0xe}, + 29: {compactTag: 0x60, symIndex: 0x0, system: 0xf}, + 30: {compactTag: 0x60, symIndex: 0x0, system: 0x11}, + 31: {compactTag: 0x60, symIndex: 0x0, system: 0x12}, + 32: {compactTag: 0x60, symIndex: 0x0, system: 0x13}, + 33: {compactTag: 0x60, symIndex: 0x0, system: 0x14}, + 34: {compactTag: 0x60, symIndex: 0x0, system: 0x15}, + 35: {compactTag: 0x60, symIndex: 0x0, system: 0x16}, + 36: {compactTag: 0x60, symIndex: 0x0, system: 0x17}, + 37: {compactTag: 0x60, symIndex: 0x0, system: 0x18}, + 38: {compactTag: 0x60, symIndex: 0x0, system: 0x19}, + 39: {compactTag: 0x60, symIndex: 0x0, system: 0x1f}, + 40: {compactTag: 0x60, symIndex: 0x0, system: 0x21}, + 41: {compactTag: 0x60, symIndex: 0x0, system: 0x23}, + 42: {compactTag: 0x60, symIndex: 0x0, system: 0x24}, + 43: {compactTag: 0x60, symIndex: 0x0, system: 0x25}, + 44: {compactTag: 0x60, symIndex: 0x0, system: 0x28}, + 45: {compactTag: 0x60, symIndex: 0x0, system: 0x29}, + 46: {compactTag: 0x60, symIndex: 0x0, system: 0x2a}, + 47: {compactTag: 0x60, symIndex: 0x0, system: 0x2b}, + 48: {compactTag: 0x60, symIndex: 0x0, system: 0x2c}, + 49: {compactTag: 0x60, symIndex: 0x0, system: 0x2d}, + 50: {compactTag: 0x60, symIndex: 0x0, system: 0x30}, + 51: {compactTag: 0x60, symIndex: 0x0, system: 0x31}, + 52: {compactTag: 0x60, symIndex: 0x0, system: 0x32}, + 53: {compactTag: 0x60, symIndex: 0x0, system: 0x33}, + 54: {compactTag: 0x60, symIndex: 0x0, system: 0x34}, + 55: {compactTag: 0x60, symIndex: 0x0, system: 0x35}, + 56: {compactTag: 0x60, symIndex: 0x0, system: 0x36}, + 57: {compactTag: 0x60, symIndex: 0x0, system: 0x37}, + 58: {compactTag: 0x60, symIndex: 0x0, system: 0x39}, + 59: {compactTag: 0x60, symIndex: 0x0, system: 0x43}, + 60: {compactTag: 0x64, symIndex: 0x0, system: 0x0}, + 61: {compactTag: 0x64, symIndex: 0x38, system: 0x3}, + 62: {compactTag: 0x64, symIndex: 0x42, system: 0x4}, + 63: {compactTag: 0x7c, symIndex: 0x50, system: 0x37}, + 64: {compactTag: 0x7c, symIndex: 0x0, system: 0x0}, + 65: {compactTag: 0x114, symIndex: 0x43, system: 0x4}, + 66: {compactTag: 0x114, symIndex: 0x18, system: 0x0}, + 67: {compactTag: 0x114, symIndex: 0x3b, system: 0x3}, + 68: {compactTag: 0x123, symIndex: 0x1, system: 0x0}, + 69: {compactTag: 0x123, symIndex: 0x3c, system: 0x3}, + 70: {compactTag: 0x123, symIndex: 0x44, system: 0x4}, + 71: {compactTag: 0x158, symIndex: 0x0, system: 0x0}, + 72: {compactTag: 0x158, symIndex: 0x3b, system: 0x3}, + 73: {compactTag: 0x158, symIndex: 0x45, system: 0x4}, + 74: {compactTag: 0x160, symIndex: 0x0, system: 0x0}, + 75: {compactTag: 0x160, symIndex: 0x38, system: 0x3}, + 76: {compactTag: 0x16d, symIndex: 0x1b, system: 0x0}, + 77: {compactTag: 0x16d, symIndex: 0x0, system: 0x9}, + 78: {compactTag: 0x16d, symIndex: 0x0, system: 0xa}, + 79: {compactTag: 0x17c, symIndex: 0x0, system: 0x0}, + 80: {compactTag: 0x17c, symIndex: 0x3d, system: 0x3}, + 81: {compactTag: 0x17c, symIndex: 0x42, system: 0x4}, + 82: {compactTag: 0x182, symIndex: 0x6, system: 0x0}, + 83: {compactTag: 0x182, symIndex: 0x38, system: 0x3}, + 84: {compactTag: 0x1b1, symIndex: 0x0, system: 0x0}, + 85: {compactTag: 0x1b1, symIndex: 0x3e, system: 0x3}, + 86: {compactTag: 0x1b6, symIndex: 0x42, system: 0x4}, + 87: {compactTag: 0x1b6, symIndex: 0x1b, system: 0x0}, + 88: {compactTag: 0x1d2, symIndex: 0x42, system: 0x4}, + 89: {compactTag: 0x1d2, symIndex: 0x0, system: 0x0}, + 90: {compactTag: 0x1f3, symIndex: 0x0, system: 0xb}, + 91: {compactTag: 0x1fd, symIndex: 0x4e, system: 0x24}, + 92: {compactTag: 0x1fd, symIndex: 0x26, system: 0x0}, + 93: {compactTag: 0x1ff, symIndex: 0x42, system: 0x4}, + 94: {compactTag: 0x204, symIndex: 0x15, system: 0x0}, + 95: {compactTag: 0x204, symIndex: 0x3f, system: 0x3}, + 96: {compactTag: 0x204, symIndex: 0x46, system: 0x4}, + 97: {compactTag: 0x20c, symIndex: 0x0, system: 0xb}, + 98: {compactTag: 0x20f, symIndex: 0x6, system: 0x0}, + 99: {compactTag: 0x20f, symIndex: 0x38, system: 0x3}, + 100: {compactTag: 0x20f, symIndex: 0x42, system: 0x4}, + 101: {compactTag: 0x22e, symIndex: 0x0, system: 0x0}, + 102: {compactTag: 0x22e, symIndex: 0x47, system: 0x4}, + 103: {compactTag: 0x22f, symIndex: 0x42, system: 0x4}, + 104: {compactTag: 0x22f, symIndex: 0x1b, system: 0x0}, + 105: {compactTag: 0x238, symIndex: 0x42, system: 0x4}, + 106: {compactTag: 0x238, symIndex: 0x28, system: 0x0}, + 107: {compactTag: 0x265, symIndex: 0x38, system: 0x3}, + 108: {compactTag: 0x265, symIndex: 0x0, system: 0x0}, + 109: {compactTag: 0x29d, symIndex: 0x22, system: 0x0}, + 110: {compactTag: 0x29d, symIndex: 0x40, system: 0x3}, + 111: {compactTag: 0x29d, symIndex: 0x48, system: 0x4}, + 112: {compactTag: 0x29d, symIndex: 0x4d, system: 0xc}, + 113: {compactTag: 0x2bd, symIndex: 0x31, system: 0x0}, + 114: {compactTag: 0x2bd, symIndex: 0x3e, system: 0x3}, + 115: {compactTag: 0x2bd, symIndex: 0x42, system: 0x4}, + 116: {compactTag: 0x2cd, symIndex: 0x1b, system: 0x0}, + 117: {compactTag: 0x2cd, symIndex: 0x49, system: 0x4}, + 118: {compactTag: 0x2ce, symIndex: 0x49, system: 0x4}, + 119: {compactTag: 0x2d0, symIndex: 0x33, system: 0x0}, + 120: {compactTag: 0x2d0, symIndex: 0x4a, system: 0x4}, + 121: {compactTag: 0x2d1, symIndex: 0x42, system: 0x4}, + 122: {compactTag: 0x2d1, symIndex: 0x28, system: 0x0}, + 123: {compactTag: 0x2d3, symIndex: 0x34, system: 0x0}, + 124: {compactTag: 0x2d3, symIndex: 0x4b, system: 0x4}, + 125: {compactTag: 0x2f9, symIndex: 0x0, system: 0x0}, + 126: {compactTag: 0x2f9, symIndex: 0x38, system: 0x3}, + 127: {compactTag: 0x2f9, symIndex: 0x42, system: 0x4}, + 128: {compactTag: 0x2ff, symIndex: 0x36, system: 0x0}, + 129: {compactTag: 0x2ff, symIndex: 0x41, system: 0x3}, + 130: {compactTag: 0x2ff, symIndex: 0x4c, system: 0x4}, +} // Size: 810 bytes + +var tagToDecimal = []uint8{ // 775 elements + // Entry 0 - 3F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 40 - 7F + 0x05, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, + // Entry 80 - BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry C0 - FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 100 - 13F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 140 - 17F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 180 - 1BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 1C0 - 1FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 200 - 23F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x05, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 240 - 27F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 280 - 2BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 2C0 - 2FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 300 - 33F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, +} // Size: 799 bytes + +var tagToScientific = []uint8{ // 775 elements + // Entry 0 - 3F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 40 - 7F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 80 - BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry C0 - FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 100 - 13F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, + 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 180 - 1BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 1C0 - 1FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 200 - 23F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, 0x02, + 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 240 - 27F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 280 - 2BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 2C0 - 2FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 300 - 33F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x09, +} // Size: 799 bytes + +var tagToPercent = []uint8{ // 775 elements + // Entry 0 - 3F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x06, 0x06, 0x03, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, 0x03, + 0x03, 0x04, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x07, 0x07, 0x04, 0x04, + // Entry 80 - BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x03, 0x04, + 0x04, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 100 - 13F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + 0x0b, 0x0b, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 140 - 17F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 180 - 1BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + // Entry 1C0 - 1FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 200 - 23F + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x06, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + // Entry 280 - 2BF + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x0e, + // Entry 2C0 - 2FF + 0x0e, 0x0e, 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 300 - 33F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0a, +} // Size: 799 bytes + +var formats = []Pattern{Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x0, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x3, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xc, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xa, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x8, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x6, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x3}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xd, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x4}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x2, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x03%\u00a0\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "\x01[\x01]", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x5, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x1, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x01%\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}} + +// Total table size 8634 bytes (8KiB); checksum: 8F23386D diff --git a/vendor/golang.org/x/text/internal/stringset/set.go b/vendor/golang.org/x/text/internal/stringset/set.go new file mode 100644 index 000000000..bb2fffbc7 --- /dev/null +++ b/vendor/golang.org/x/text/internal/stringset/set.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stringset provides a way to represent a collection of strings +// compactly. +package stringset + +import "sort" + +// A Set holds a collection of strings that can be looked up by an index number. +type Set struct { + // These fields are exported to allow for code generation. + + Data string + Index []uint16 +} + +// Elem returns the string with index i. It panics if i is out of range. +func (s *Set) Elem(i int) string { + return s.Data[s.Index[i]:s.Index[i+1]] +} + +// Len returns the number of strings in the set. +func (s *Set) Len() int { + return len(s.Index) - 1 +} + +// Search returns the index of the given string or -1 if it is not in the set. +// The Set must have been created with strings in sorted order. +func Search(s *Set, str string) int { + // TODO: optimize this if it gets used a lot. + n := len(s.Index) - 1 + p := sort.Search(n, func(i int) bool { + return s.Elem(i) >= str + }) + if p == n || str != s.Elem(p) { + return -1 + } + return p +} + +// A Builder constructs Sets. +type Builder struct { + set Set + index map[string]int +} + +// NewBuilder returns a new and initialized Builder. +func NewBuilder() *Builder { + return &Builder{ + set: Set{ + Index: []uint16{0}, + }, + index: map[string]int{}, + } +} + +// Set creates the set created so far. +func (b *Builder) Set() Set { + return b.set +} + +// Index returns the index for the given string, which must have been added +// before. +func (b *Builder) Index(s string) int { + return b.index[s] +} + +// Add adds a string to the index. Strings that are added by a single Add will +// be stored together, unless they match an existing string. +func (b *Builder) Add(ss ...string) { + // First check if the string already exists. + for _, s := range ss { + if _, ok := b.index[s]; ok { + continue + } + b.index[s] = len(b.set.Index) - 1 + b.set.Data += s + x := len(b.set.Data) + if x > 0xFFFF { + panic("Index too > 0xFFFF") + } + b.set.Index = append(b.set.Index, uint16(x)) + } +} diff --git a/vendor/golang.org/x/text/message/catalog.go b/vendor/golang.org/x/text/message/catalog.go new file mode 100644 index 000000000..068271def --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +// TODO: some types in this file will need to be made public at some time. +// Documentation and method names will reflect this by using the exported name. + +import ( + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// MatchLanguage reports the matched tag obtained from language.MatchStrings for +// the Matcher of the DefaultCatalog. +func MatchLanguage(preferred ...string) language.Tag { + c := DefaultCatalog + tag, _ := language.MatchStrings(c.Matcher(), preferred...) + return tag +} + +// DefaultCatalog is used by SetString. +var DefaultCatalog catalog.Catalog = defaultCatalog + +var defaultCatalog = catalog.NewBuilder() + +// SetString calls SetString on the initial default Catalog. +func SetString(tag language.Tag, key string, msg string) error { + return defaultCatalog.SetString(tag, key, msg) +} + +// Set calls Set on the initial default Catalog. +func Set(tag language.Tag, key string, msg ...catalog.Message) error { + return defaultCatalog.Set(tag, key, msg...) +} diff --git a/vendor/golang.org/x/text/message/catalog/catalog.go b/vendor/golang.org/x/text/message/catalog/catalog.go new file mode 100644 index 000000000..96955d075 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/catalog.go @@ -0,0 +1,365 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catalog defines collections of translated format strings. +// +// This package mostly defines types for populating catalogs with messages. The +// catmsg package contains further definitions for creating custom message and +// dictionary types as well as packages that use Catalogs. +// +// Package catalog defines various interfaces: Dictionary, Loader, and Message. +// A Dictionary maintains a set of translations of format strings for a single +// language. The Loader interface defines a source of dictionaries. A +// translation of a format string is represented by a Message. +// +// # Catalogs +// +// A Catalog defines a programmatic interface for setting message translations. +// It maintains a set of per-language dictionaries with translations for a set +// of keys. For message translation to function properly, a translation should +// be defined for each key for each supported language. A dictionary may be +// underspecified, though, if there is a parent language that already defines +// the key. For example, a Dictionary for "en-GB" could leave out entries that +// are identical to those in a dictionary for "en". +// +// # Messages +// +// A Message is a format string which varies on the value of substitution +// variables. For instance, to indicate the number of results one could want "no +// results" if there are none, "1 result" if there is 1, and "%d results" for +// any other number. Catalog is agnostic to the kind of format strings that are +// used: for instance, messages can follow either the printf-style substitution +// from package fmt or use templates. +// +// A Message does not substitute arguments in the format string. This job is +// reserved for packages that render strings, such as message, that use Catalogs +// to selected string. This separation of concerns allows Catalog to be used to +// store any kind of formatting strings. +// +// # Selecting messages based on linguistic features of substitution arguments +// +// Messages may vary based on any linguistic features of the argument values. +// The most common one is plural form, but others exist. +// +// Selection messages are provided in packages that provide support for a +// specific linguistic feature. The following snippet uses plural.Selectf: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// plural.Selectf(1, "", +// plural.One, "You are 1 minute late.", +// plural.Other, "You are %d minutes late.")) +// +// In this example, a message is stored in the Catalog where one of two messages +// is selected based on the first argument, a number. The first message is +// selected if the argument is singular (identified by the selector "one") and +// the second message is selected in all other cases. The selectors are defined +// by the plural rules defined in CLDR. The selector "other" is special and will +// always match. Each language always defines one of the linguistic categories +// to be "other." For English, singular is "one" and plural is "other". +// +// Selects can be nested. This allows selecting sentences based on features of +// multiple arguments or multiple linguistic properties of a single argument. +// +// # String interpolation +// +// There is often a lot of commonality between the possible variants of a +// message. For instance, in the example above the word "minute" varies based on +// the plural catogory of the argument, but the rest of the sentence is +// identical. Using interpolation the above message can be rewritten as: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", +// plural.Selectf(1, "", plural.One, "minute", plural.Other, "minutes")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// Var is defined to return the variable name if the message does not yield a +// match. This allows us to further simplify this snippet to +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Selectf(1, "", plural.One, "minute")), +// catalog.String("You are %d ${minutes} late.")) +// +// Overall this is still only a minor improvement, but things can get a lot more +// unwieldy if more than one linguistic feature is used to determine a message +// variant. Consider the following example: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// catalog.Var("their", +// plural.Selectf(1, "" +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// catalog.Var("invites", plural.Selectf(1, "", plural.One, "invite")) +// catalog.String("%[1]v ${invites} %[2]v to ${their} party.")), +// +// Without variable substitution, this would have to be written as +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// plural.Selectf(1, "", +// plural.One, gender.Select(1, +// "female", "%[1]v invites %[2]v to her party." +// "other", "%[1]v invites %[2]v to his party."), +// plural.Other, "%[1]v invites %[2]v to their party.")) +// +// Not necessarily shorter, but using variables there is less duplication and +// the messages are more maintenance friendly. Moreover, languages may have up +// to six plural forms. This makes the use of variables more welcome. +// +// Different messages using the same inflections can reuse variables by moving +// them to macros. Using macros we can rewrite the message as: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.SetString(language.English, "%[1]v invite(s) %[2]v to their party.", +// "%[1]v ${invites(1)} %[2]v to ${their(1)} party.") +// +// Where the following macros were defined separately. +// +// catalog.SetMacro(language.English, "invites", plural.Selectf(1, "", +// plural.One, "invite")) +// catalog.SetMacro(language.English, "their", plural.Selectf(1, "", +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// +// Placeholders use parentheses and the arguments to invoke a macro. +// +// # Looking up messages +// +// Message lookup using Catalogs is typically only done by specialized packages +// and is not something the user should be concerned with. For instance, to +// express the tardiness of a user using the related message we defined earlier, +// the user may use the package message like so: +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) +// +// Which would print: +// +// You are 5 minutes late. +// +// This package is UNDER CONSTRUCTION and its API may change. +package catalog // import "golang.org/x/text/message/catalog" + +// TODO: +// Some way to freeze a catalog. +// - Locking on each lockup turns out to be about 50% of the total running time +// for some of the benchmarks in the message package. +// Consider these: +// - Sequence type to support sequences in user-defined messages. +// - Garbage collection: Remove dictionaries that can no longer be reached +// as other dictionaries have been added that cover all possible keys. + +import ( + "errors" + "fmt" + + "golang.org/x/text/internal" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// A Catalog allows lookup of translated messages. +type Catalog interface { + // Languages returns all languages for which the Catalog contains variants. + Languages() []language.Tag + + // Matcher returns a Matcher for languages from this Catalog. + Matcher() language.Matcher + + // A Context is used for evaluating Messages. + Context(tag language.Tag, r catmsg.Renderer) *Context + + // This method also makes Catalog a private interface. + lookup(tag language.Tag, key string) (data string, ok bool) +} + +// NewFromMap creates a Catalog from the given map. If a Dictionary is +// underspecified the entry is retrieved from a parent language. +func NewFromMap(dictionaries map[string]Dictionary, opts ...Option) (Catalog, error) { + options := options{} + for _, o := range opts { + o(&options) + } + c := &catalog{ + dicts: map[language.Tag]Dictionary{}, + } + _, hasFallback := dictionaries[options.fallback.String()] + if hasFallback { + // TODO: Should it be okay to not have a fallback language? + // Catalog generators could enforce there is always a fallback. + c.langs = append(c.langs, options.fallback) + } + for lang, dict := range dictionaries { + tag, err := language.Parse(lang) + if err != nil { + return nil, fmt.Errorf("catalog: invalid language tag %q", lang) + } + if _, ok := c.dicts[tag]; ok { + return nil, fmt.Errorf("catalog: duplicate entry for tag %q after normalization", tag) + } + c.dicts[tag] = dict + if !hasFallback || tag != options.fallback { + c.langs = append(c.langs, tag) + } + } + if hasFallback { + internal.SortTags(c.langs[1:]) + } else { + internal.SortTags(c.langs) + } + c.matcher = language.NewMatcher(c.langs) + return c, nil +} + +// A Dictionary is a source of translations for a single language. +type Dictionary interface { + // Lookup returns a message compiled with catmsg.Compile for the given key. + // It returns false for ok if such a message could not be found. + Lookup(key string) (data string, ok bool) +} + +type catalog struct { + langs []language.Tag + dicts map[language.Tag]Dictionary + macros store + matcher language.Matcher +} + +func (c *catalog) Languages() []language.Tag { return c.langs } +func (c *catalog) Matcher() language.Matcher { return c.matcher } + +func (c *catalog) lookup(tag language.Tag, key string) (data string, ok bool) { + for ; ; tag = tag.Parent() { + if dict, ok := c.dicts[tag]; ok { + if data, ok := dict.Lookup(key); ok { + return data, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (c *catalog) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: c, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&c.macros, tag}), + } +} + +// A Builder allows building a Catalog programmatically. +type Builder struct { + options + matcher language.Matcher + + index store + macros store +} + +type options struct { + fallback language.Tag +} + +// An Option configures Catalog behavior. +type Option func(*options) + +// Fallback specifies the default fallback language. The default is Und. +func Fallback(tag language.Tag) Option { + return func(o *options) { o.fallback = tag } +} + +// TODO: +// // Catalogs specifies one or more sources for a Catalog. +// // Lookups are in order. +// // This can be changed inserting a Catalog used for setting, which implements +// // Loader, used for setting in the chain. +// func Catalogs(d ...Loader) Option { +// return nil +// } +// +// func Delims(start, end string) Option {} +// +// func Dict(tag language.Tag, d ...Dictionary) Option + +// NewBuilder returns an empty mutable Catalog. +func NewBuilder(opts ...Option) *Builder { + c := &Builder{} + for _, o := range opts { + o(&c.options) + } + return c +} + +// SetString is shorthand for Set(tag, key, String(msg)). +func (c *Builder) SetString(tag language.Tag, key string, msg string) error { + return c.set(tag, key, &c.index, String(msg)) +} + +// Set sets the translation for the given language and key. +// +// When evaluation this message, the first Message in the sequence to msgs to +// evaluate to a string will be the message returned. +func (c *Builder) Set(tag language.Tag, key string, msg ...Message) error { + return c.set(tag, key, &c.index, msg...) +} + +// SetMacro defines a Message that may be substituted in another message. +// The arguments to a macro Message are passed as arguments in the +// placeholder the form "${foo(arg1, arg2)}". +func (c *Builder) SetMacro(tag language.Tag, name string, msg ...Message) error { + return c.set(tag, name, &c.macros, msg...) +} + +// ErrNotFound indicates there was no message for the given key. +var ErrNotFound = errors.New("catalog: message not found") + +// String specifies a plain message string. It can be used as fallback if no +// other strings match or as a simple standalone message. +// +// It is an error to pass more than one String in a message sequence. +func String(name string) Message { + return catmsg.String(name) +} + +// Var sets a variable that may be substituted in formatting patterns using +// named substitution of the form "${name}". The name argument is used as a +// fallback if the statements do not produce a match. The statement sequence may +// not contain any Var calls. +// +// The name passed to a Var must be unique within message sequence. +func Var(name string, msg ...Message) Message { + return &catmsg.Var{Name: name, Message: firstInSequence(msg)} +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (b *Builder) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: b, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&b.macros, tag}), + } +} + +// A Context is used for evaluating Messages. +// Only one Message may be formatted per context at any given time. +type Context struct { + cat Catalog + tag language.Tag // TODO: use compact index. + dec *catmsg.Decoder +} + +// Execute looks up and executes the message with the given key. +// It returns ErrNotFound if no message could be found in the index. +func (c *Context) Execute(key string) error { + data, ok := c.cat.lookup(c.tag, key) + if !ok { + return ErrNotFound + } + return c.dec.Execute(data) +} diff --git a/vendor/golang.org/x/text/message/catalog/dict.go b/vendor/golang.org/x/text/message/catalog/dict.go new file mode 100644 index 000000000..a0eb81810 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/dict.go @@ -0,0 +1,129 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catalog + +import ( + "sync" + + "golang.org/x/text/internal" + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// TODO: +// Dictionary returns a Dictionary that returns the first Message, using the +// given language tag, that matches: +// 1. the last one registered by one of the Set methods +// 2. returned by one of the Loaders +// 3. repeat from 1. using the parent language +// This approach allows messages to be underspecified. +// func (c *Catalog) Dictionary(tag language.Tag) (Dictionary, error) { +// // TODO: verify dictionary exists. +// return &dict{&c.index, tag}, nil +// } + +type dict struct { + s *store + tag language.Tag // TODO: make compact tag. +} + +func (d *dict) Lookup(key string) (data string, ok bool) { + return d.s.lookup(d.tag, key) +} + +func (b *Builder) lookup(tag language.Tag, key string) (data string, ok bool) { + return b.index.lookup(tag, key) +} + +func (c *Builder) set(tag language.Tag, key string, s *store, msg ...Message) error { + data, err := catmsg.Compile(tag, &dict{&c.macros, tag}, firstInSequence(msg)) + + s.mutex.Lock() + defer s.mutex.Unlock() + + m := s.index[tag] + if m == nil { + m = msgMap{} + if s.index == nil { + s.index = map[language.Tag]msgMap{} + } + c.matcher = nil + s.index[tag] = m + } + + m[key] = data + return err +} + +func (c *Builder) Matcher() language.Matcher { + c.index.mutex.RLock() + m := c.matcher + c.index.mutex.RUnlock() + if m != nil { + return m + } + + c.index.mutex.Lock() + if c.matcher == nil { + c.matcher = language.NewMatcher(c.unlockedLanguages()) + } + m = c.matcher + c.index.mutex.Unlock() + return m +} + +type store struct { + mutex sync.RWMutex + index map[language.Tag]msgMap +} + +type msgMap map[string]string + +func (s *store) lookup(tag language.Tag, key string) (data string, ok bool) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for ; ; tag = tag.Parent() { + if msgs, ok := s.index[tag]; ok { + if msg, ok := msgs[key]; ok { + return msg, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Languages returns all languages for which the Catalog contains variants. +func (b *Builder) Languages() []language.Tag { + s := &b.index + s.mutex.RLock() + defer s.mutex.RUnlock() + + return b.unlockedLanguages() +} + +func (b *Builder) unlockedLanguages() []language.Tag { + s := &b.index + if len(s.index) == 0 { + return nil + } + tags := make([]language.Tag, 0, len(s.index)) + _, hasFallback := s.index[b.options.fallback] + offset := 0 + if hasFallback { + tags = append(tags, b.options.fallback) + offset = 1 + } + for t := range s.index { + if t != b.options.fallback { + tags = append(tags, t) + } + } + internal.SortTags(tags[offset:]) + return tags +} diff --git a/vendor/golang.org/x/text/message/catalog/go19.go b/vendor/golang.org/x/text/message/catalog/go19.go new file mode 100644 index 000000000..291a4df94 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/go19.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package catalog + +import "golang.org/x/text/internal/catmsg" + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message = catmsg.Message + +type firstInSequence = catmsg.FirstOf diff --git a/vendor/golang.org/x/text/message/catalog/gopre19.go b/vendor/golang.org/x/text/message/catalog/gopre19.go new file mode 100644 index 000000000..da44ebb8b --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/gopre19.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package catalog + +import "golang.org/x/text/internal/catmsg" + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + catmsg.Message +} + +func firstInSequence(m []Message) catmsg.Message { + a := []catmsg.Message{} + for _, m := range m { + a = append(a, m) + } + return catmsg.FirstOf(a) +} diff --git a/vendor/golang.org/x/text/message/doc.go b/vendor/golang.org/x/text/message/doc.go new file mode 100644 index 000000000..4bf7bdcac --- /dev/null +++ b/vendor/golang.org/x/text/message/doc.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package message implements formatted I/O for localized strings with functions +// analogous to the fmt's print functions. It is a drop-in replacement for fmt. +// +// # Localized Formatting +// +// A format string can be localized by replacing any of the print functions of +// fmt with an equivalent call to a Printer. +// +// p := message.NewPrinter(message.MatchLanguage("en")) +// p.Println(123456.78) // Prints 123,456.78 +// +// p.Printf("%d ducks in a row", 4331) // Prints 4,331 ducks in a row +// +// p := message.NewPrinter(message.MatchLanguage("nl")) +// p.Printf("Hoogte: %.1f meter", 1244.9) // Prints Hoogte: 1,244.9 meter +// +// p := message.NewPrinter(message.MatchLanguage("bn")) +// p.Println(123456.78) // Prints ১,২৩,৪৫৬.৭৮ +// +// Printer currently supports numbers and specialized types for which packages +// exist in x/text. Other builtin types such as time.Time and slices are +// planned. +// +// Format strings largely have the same meaning as with fmt with the following +// notable exceptions: +// - flag # always resorts to fmt for printing +// - verb 'f', 'e', 'g', 'd' use localized formatting unless the '#' flag is +// specified. +// - verb 'm' inserts a translation of a string argument. +// +// See package fmt for more options. +// +// # Translation +// +// The format strings that are passed to Printf, Sprintf, Fprintf, or Errorf +// are used as keys to look up translations for the specified languages. +// More on how these need to be specified below. +// +// One can use arbitrary keys to distinguish between otherwise ambiguous +// strings: +// +// p := message.NewPrinter(language.English) +// p.Printf("archive(noun)") // Prints "archive" +// p.Printf("archive(verb)") // Prints "archive" +// +// p := message.NewPrinter(language.German) +// p.Printf("archive(noun)") // Prints "Archiv" +// p.Printf("archive(verb)") // Prints "archivieren" +// +// To retain the fallback functionality, use Key: +// +// p.Printf(message.Key("archive(noun)", "archive")) +// p.Printf(message.Key("archive(verb)", "archive")) +// +// # Translation Pipeline +// +// Format strings that contain text need to be translated to support different +// locales. The first step is to extract strings that need to be translated. +// +// 1. Install gotext +// +// go get -u golang.org/x/text/cmd/gotext +// gotext -help +// +// 2. Mark strings in your source to be translated by using message.Printer, +// instead of the functions of the fmt package. +// +// 3. Extract the strings from your source +// +// gotext extract +// +// The output will be written to the textdata directory. +// +// 4. Send the files for translation +// +// It is planned to support multiple formats, but for now one will have to +// rewrite the JSON output to the desired format. +// +// 5. Inject translations into program +// +// 6. Repeat from 2 +// +// Right now this has to be done programmatically with calls to Set or +// SetString. These functions as well as the methods defined in +// see also package golang.org/x/text/message/catalog can be used to implement +// either dynamic or static loading of messages. +// +// # Plural and Gender Forms +// +// Translated messages can vary based on the plural and gender forms of +// substitution values. In general, it is up to the translators to provide +// alternative translations for such forms. See the packages in +// golang.org/x/text/feature and golang.org/x/text/message/catalog for more +// information. +package message diff --git a/vendor/golang.org/x/text/message/format.go b/vendor/golang.org/x/text/message/format.go new file mode 100644 index 000000000..a47d17dd4 --- /dev/null +++ b/vendor/golang.org/x/text/message/format.go @@ -0,0 +1,510 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "strconv" + "unicode/utf8" + + "golang.org/x/text/internal/format" +) + +const ( + ldigits = "0123456789abcdefx" + udigits = "0123456789ABCDEFX" +) + +const ( + signed = true + unsigned = false +) + +// A formatInfo is the raw formatter used by Printf etc. +// It prints into a buffer that must be set up separately. +type formatInfo struct { + buf *bytes.Buffer + + format.Parser + + // intbuf is large enough to store %b of an int64 with a sign and + // avoids padding at the end of the struct on 32 bit architectures. + intbuf [68]byte +} + +func (f *formatInfo) init(buf *bytes.Buffer) { + f.ClearFlags() + f.buf = buf +} + +// writePadding generates n bytes of padding. +func (f *formatInfo) writePadding(n int) { + if n <= 0 { // No padding bytes needed. + return + } + f.buf.Grow(n) + // Decide which byte the padding should be filled with. + padByte := byte(' ') + if f.Zero { + padByte = byte('0') + } + // Fill padding with padByte. + for i := 0; i < n; i++ { + f.buf.WriteByte(padByte) // TODO: make more efficient. + } +} + +// pad appends b to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) pad(b []byte) { + if !f.WidthPresent || f.Width == 0 { + f.buf.Write(b) + return + } + width := f.Width - utf8.RuneCount(b) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.Write(b) + } else { + // right padding + f.buf.Write(b) + f.writePadding(width) + } +} + +// padString appends s to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) padString(s string) { + if !f.WidthPresent || f.Width == 0 { + f.buf.WriteString(s) + return + } + width := f.Width - utf8.RuneCountInString(s) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.WriteString(s) + } else { + // right padding + f.buf.WriteString(s) + f.writePadding(width) + } +} + +// fmt_boolean formats a boolean. +func (f *formatInfo) fmt_boolean(v bool) { + if v { + f.padString("true") + } else { + f.padString("false") + } +} + +// fmt_unicode formats a uint64 as "U+0078" or with f.sharp set as "U+0078 'x'". +func (f *formatInfo) fmt_unicode(u uint64) { + buf := f.intbuf[0:] + + // With default precision set the maximum needed buf length is 18 + // for formatting -1 with %#U ("U+FFFFFFFFFFFFFFFF") which fits + // into the already allocated intbuf with a capacity of 68 bytes. + prec := 4 + if f.PrecPresent && f.Prec > 4 { + prec = f.Prec + // Compute space needed for "U+" , number, " '", character, "'". + width := 2 + prec + 2 + utf8.UTFMax + 1 + if width > len(buf) { + buf = make([]byte, width) + } + } + + // Format into buf, ending at buf[i]. Formatting numbers is easier right-to-left. + i := len(buf) + + // For %#U we want to add a space and a quoted character at the end of the buffer. + if f.Sharp && u <= utf8.MaxRune && strconv.IsPrint(rune(u)) { + i-- + buf[i] = '\'' + i -= utf8.RuneLen(rune(u)) + utf8.EncodeRune(buf[i:], rune(u)) + i-- + buf[i] = '\'' + i-- + buf[i] = ' ' + } + // Format the Unicode code point u as a hexadecimal number. + for u >= 16 { + i-- + buf[i] = udigits[u&0xF] + prec-- + u >>= 4 + } + i-- + buf[i] = udigits[u] + prec-- + // Add zeros in front of the number until requested precision is reached. + for prec > 0 { + i-- + buf[i] = '0' + prec-- + } + // Add a leading "U+". + i-- + buf[i] = '+' + i-- + buf[i] = 'U' + + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// fmt_integer formats signed and unsigned integers. +func (f *formatInfo) fmt_integer(u uint64, base int, isSigned bool, digits string) { + negative := isSigned && int64(u) < 0 + if negative { + u = -u + } + + buf := f.intbuf[0:] + // The already allocated f.intbuf with a capacity of 68 bytes + // is large enough for integer formatting when no precision or width is set. + if f.WidthPresent || f.PrecPresent { + // Account 3 extra bytes for possible addition of a sign and "0x". + width := 3 + f.Width + f.Prec // wid and prec are always positive. + if width > len(buf) { + // We're going to need a bigger boat. + buf = make([]byte, width) + } + } + + // Two ways to ask for extra leading zero digits: %.3d or %03d. + // If both are specified the f.zero flag is ignored and + // padding with spaces is used instead. + prec := 0 + if f.PrecPresent { + prec = f.Prec + // Precision of 0 and value of 0 means "print nothing" but padding. + if prec == 0 && u == 0 { + oldZero := f.Zero + f.Zero = false + f.writePadding(f.Width) + f.Zero = oldZero + return + } + } else if f.Zero && f.WidthPresent { + prec = f.Width + if negative || f.Plus || f.Space { + prec-- // leave room for sign + } + } + + // Because printing is easier right-to-left: format u into buf, ending at buf[i]. + // We could make things marginally faster by splitting the 32-bit case out + // into a separate block but it's not worth the duplication, so u has 64 bits. + i := len(buf) + // Use constants for the division and modulo for more efficient code. + // Switch cases ordered by popularity. + switch base { + case 10: + for u >= 10 { + i-- + next := u / 10 + buf[i] = byte('0' + u - next*10) + u = next + } + case 16: + for u >= 16 { + i-- + buf[i] = digits[u&0xF] + u >>= 4 + } + case 8: + for u >= 8 { + i-- + buf[i] = byte('0' + u&7) + u >>= 3 + } + case 2: + for u >= 2 { + i-- + buf[i] = byte('0' + u&1) + u >>= 1 + } + default: + panic("fmt: unknown base; can't happen") + } + i-- + buf[i] = digits[u] + for i > 0 && prec > len(buf)-i { + i-- + buf[i] = '0' + } + + // Various prefixes: 0x, -, etc. + if f.Sharp { + switch base { + case 8: + if buf[i] != '0' { + i-- + buf[i] = '0' + } + case 16: + // Add a leading 0x or 0X. + i-- + buf[i] = digits[16] + i-- + buf[i] = '0' + } + } + + if negative { + i-- + buf[i] = '-' + } else if f.Plus { + i-- + buf[i] = '+' + } else if f.Space { + i-- + buf[i] = ' ' + } + + // Left padding with zeros has already been handled like precision earlier + // or the f.zero flag is ignored due to an explicitly set precision. + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// truncate truncates the string to the specified precision, if present. +func (f *formatInfo) truncate(s string) string { + if f.PrecPresent { + n := f.Prec + for i := range s { + n-- + if n < 0 { + return s[:i] + } + } + } + return s +} + +// fmt_s formats a string. +func (f *formatInfo) fmt_s(s string) { + s = f.truncate(s) + f.padString(s) +} + +// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sbx(s string, b []byte, digits string) { + length := len(b) + if b == nil { + // No byte slice present. Assume string s should be encoded. + length = len(s) + } + // Set length to not process more bytes than the precision demands. + if f.PrecPresent && f.Prec < length { + length = f.Prec + } + // Compute width of the encoding taking into account the f.sharp and f.space flag. + width := 2 * length + if width > 0 { + if f.Space { + // Each element encoded by two hexadecimals will get a leading 0x or 0X. + if f.Sharp { + width *= 2 + } + // Elements will be separated by a space. + width += length - 1 + } else if f.Sharp { + // Only a leading 0x or 0X will be added for the whole string. + width += 2 + } + } else { // The byte slice or string that should be encoded is empty. + if f.WidthPresent { + f.writePadding(f.Width) + } + return + } + // Handle padding to the left. + if f.WidthPresent && f.Width > width && !f.Minus { + f.writePadding(f.Width - width) + } + // Write the encoding directly into the output buffer. + buf := f.buf + if f.Sharp { + // Add leading 0x or 0X. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + var c byte + for i := 0; i < length; i++ { + if f.Space && i > 0 { + // Separate elements with a space. + buf.WriteByte(' ') + if f.Sharp { + // Add leading 0x or 0X for each element. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + } + if b != nil { + c = b[i] // Take a byte from the input byte slice. + } else { + c = s[i] // Take a byte from the input string. + } + // Encode each byte as two hexadecimal digits. + buf.WriteByte(digits[c>>4]) + buf.WriteByte(digits[c&0xF]) + } + // Handle padding to the right. + if f.WidthPresent && f.Width > width && f.Minus { + f.writePadding(f.Width - width) + } +} + +// fmt_sx formats a string as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sx(s, digits string) { + f.fmt_sbx(s, nil, digits) +} + +// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_bx(b []byte, digits string) { + f.fmt_sbx("", b, digits) +} + +// fmt_q formats a string as a double-quoted, escaped Go string constant. +// If f.sharp is set a raw (backquoted) string may be returned instead +// if the string does not contain any control characters other than tab. +func (f *formatInfo) fmt_q(s string) { + s = f.truncate(s) + if f.Sharp && strconv.CanBackquote(s) { + f.padString("`" + s + "`") + return + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteToASCII(buf, s)) + } else { + f.pad(strconv.AppendQuote(buf, s)) + } +} + +// fmt_c formats an integer as a Unicode character. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_c(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + w := utf8.EncodeRune(buf[:utf8.UTFMax], r) + f.pad(buf[:w]) +} + +// fmt_qc formats an integer as a single-quoted, escaped Go character constant. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_qc(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteRuneToASCII(buf, r)) + } else { + f.pad(strconv.AppendQuoteRune(buf, r)) + } +} + +// fmt_float formats a float64. It assumes that verb is a valid format specifier +// for strconv.AppendFloat and therefore fits into a byte. +func (f *formatInfo) fmt_float(v float64, size int, verb rune, prec int) { + // Explicit precision in format specifier overrules default precision. + if f.PrecPresent { + prec = f.Prec + } + // Format number, reserving space for leading + sign if needed. + num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size) + if num[1] == '-' || num[1] == '+' { + num = num[1:] + } else { + num[0] = '+' + } + // f.space means to add a leading space instead of a "+" sign unless + // the sign is explicitly asked for by f.plus. + if f.Space && num[0] == '+' && !f.Plus { + num[0] = ' ' + } + // Special handling for infinities and NaN, + // which don't look like a number so shouldn't be padded with zeros. + if num[1] == 'I' || num[1] == 'N' { + oldZero := f.Zero + f.Zero = false + // Remove sign before NaN if not asked for. + if num[1] == 'N' && !f.Space && !f.Plus { + num = num[1:] + } + f.pad(num) + f.Zero = oldZero + return + } + // The sharp flag forces printing a decimal point for non-binary formats + // and retains trailing zeros, which we may need to restore. + if f.Sharp && verb != 'b' { + digits := 0 + switch verb { + case 'v', 'g', 'G': + digits = prec + // If no precision is set explicitly use a precision of 6. + if digits == -1 { + digits = 6 + } + } + + // Buffer pre-allocated with enough room for + // exponent notations of the form "e+123". + var tailBuf [5]byte + tail := tailBuf[:0] + + hasDecimalPoint := false + // Starting from i = 1 to skip sign at num[0]. + for i := 1; i < len(num); i++ { + switch num[i] { + case '.': + hasDecimalPoint = true + case 'e', 'E': + tail = append(tail, num[i:]...) + num = num[:i] + default: + digits-- + } + } + if !hasDecimalPoint { + num = append(num, '.') + } + for digits > 0 { + num = append(num, '0') + digits-- + } + num = append(num, tail...) + } + // We want a sign if asked for and if the sign is not positive. + if f.Plus || num[0] != '+' { + // If we're zero padding to the left we want the sign before the leading zeros. + // Achieve this by writing the sign out and then padding the unsigned number. + if f.Zero && f.WidthPresent && f.Width > len(num) { + f.buf.WriteByte(num[0]) + f.writePadding(f.Width - len(num)) + f.buf.Write(num[1:]) + return + } + f.pad(num) + return + } + // No sign to show and the number is positive; just print the unsigned number. + f.pad(num[1:]) +} diff --git a/vendor/golang.org/x/text/message/message.go b/vendor/golang.org/x/text/message/message.go new file mode 100644 index 000000000..91a972642 --- /dev/null +++ b/vendor/golang.org/x/text/message/message.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message // import "golang.org/x/text/message" + +import ( + "io" + "os" + + // Include features to facilitate generated catalogs. + _ "golang.org/x/text/feature/plural" + + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// A Printer implements language-specific formatted I/O analogous to the fmt +// package. +type Printer struct { + // the language + tag language.Tag + + toDecimal number.Formatter + toScientific number.Formatter + + cat catalog.Catalog +} + +type options struct { + cat catalog.Catalog + // TODO: + // - allow %s to print integers in written form (tables are likely too large + // to enable this by default). + // - list behavior + // +} + +// An Option defines an option of a Printer. +type Option func(o *options) + +// Catalog defines the catalog to be used. +func Catalog(c catalog.Catalog) Option { + return func(o *options) { o.cat = c } +} + +// NewPrinter returns a Printer that formats messages tailored to language t. +func NewPrinter(t language.Tag, opts ...Option) *Printer { + options := &options{ + cat: DefaultCatalog, + } + for _, o := range opts { + o(options) + } + p := &Printer{ + tag: t, + cat: options.cat, + } + p.toDecimal.InitDecimal(t) + p.toScientific.InitScientific(t) + return p +} + +// Sprint is like fmt.Sprint, but using language-specific formatting. +func (p *Printer) Sprint(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrint(a) + s := pp.String() + pp.free() + return s +} + +// Fprint is like fmt.Fprint, but using language-specific formatting. +func (p *Printer) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrint(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Print is like fmt.Print, but using language-specific formatting. +func (p *Printer) Print(a ...interface{}) (n int, err error) { + return p.Fprint(os.Stdout, a...) +} + +// Sprintln is like fmt.Sprintln, but using language-specific formatting. +func (p *Printer) Sprintln(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrintln(a) + s := pp.String() + pp.free() + return s +} + +// Fprintln is like fmt.Fprintln, but using language-specific formatting. +func (p *Printer) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrintln(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Println is like fmt.Println, but using language-specific formatting. +func (p *Printer) Println(a ...interface{}) (n int, err error) { + return p.Fprintln(os.Stdout, a...) +} + +// Sprintf is like fmt.Sprintf, but using language-specific formatting. +func (p *Printer) Sprintf(key Reference, a ...interface{}) string { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + s := pp.String() + pp.free() + return s +} + +// Fprintf is like fmt.Fprintf, but using language-specific formatting. +func (p *Printer) Fprintf(w io.Writer, key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = w.Write(pp.Bytes()) + pp.free() + return n, err + +} + +// Printf is like fmt.Printf, but using language-specific formatting. +func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = os.Stdout.Write(pp.Bytes()) + pp.free() + return n, err +} + +func lookupAndFormat(p *printer, r Reference, a []interface{}) { + p.fmt.Reset(a) + switch v := r.(type) { + case string: + if p.catContext.Execute(v) == catalog.ErrNotFound { + p.Render(v) + return + } + case key: + if p.catContext.Execute(v.id) == catalog.ErrNotFound && + p.catContext.Execute(v.fallback) == catalog.ErrNotFound { + p.Render(v.fallback) + return + } + default: + panic("key argument is not a Reference") + } +} + +type rawPrinter struct { + p *printer +} + +func (p rawPrinter) Render(msg string) { p.p.WriteString(msg) } +func (p rawPrinter) Arg(i int) interface{} { return nil } + +// Arg implements catmsg.Renderer. +func (p *printer) Arg(i int) interface{} { // TODO, also return "ok" bool + i-- + if uint(i) < uint(len(p.fmt.Args)) { + return p.fmt.Args[i] + } + return nil +} + +// Render implements catmsg.Renderer. +func (p *printer) Render(msg string) { + p.doPrintf(msg) +} + +// A Reference is a string or a message reference. +type Reference interface { + // TODO: also allow []string +} + +// Key creates a message Reference for a message where the given id is used for +// message lookup and the fallback is returned when no matches are found. +func Key(id string, fallback string) Reference { + return key{id, fallback} +} + +type key struct { + id, fallback string +} diff --git a/vendor/golang.org/x/text/message/print.go b/vendor/golang.org/x/text/message/print.go new file mode 100644 index 000000000..da304cc0e --- /dev/null +++ b/vendor/golang.org/x/text/message/print.go @@ -0,0 +1,984 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "fmt" // TODO: consider copying interfaces from package fmt to avoid dependency. + "math" + "reflect" + "sync" + "unicode/utf8" + + "golang.org/x/text/internal/format" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// Strings for use with buffer.WriteString. +// This is less overhead than using buffer.Write with byte arrays. +const ( + commaSpaceString = ", " + nilAngleString = "" + nilParenString = "(nil)" + nilString = "nil" + mapString = "map[" + percentBangString = "%!" + missingString = "(MISSING)" + badIndexString = "(BADINDEX)" + panicString = "(PANIC=" + extraString = "%!(EXTRA " + badWidthString = "%!(BADWIDTH)" + badPrecString = "%!(BADPREC)" + noVerbString = "%!(NOVERB)" + + invReflectString = "" +) + +var printerPool = sync.Pool{ + New: func() interface{} { return new(printer) }, +} + +// newPrinter allocates a new printer struct or grabs a cached one. +func newPrinter(pp *Printer) *printer { + p := printerPool.Get().(*printer) + p.Printer = *pp + // TODO: cache most of the following call. + p.catContext = pp.cat.Context(pp.tag, p) + + p.panicking = false + p.erroring = false + p.fmt.init(&p.Buffer) + return p +} + +// free saves used printer structs in printerFree; avoids an allocation per invocation. +func (p *printer) free() { + p.Buffer.Reset() + p.arg = nil + p.value = reflect.Value{} + printerPool.Put(p) +} + +// printer is used to store a printer's state. +// It implements "golang.org/x/text/internal/format".State. +type printer struct { + Printer + + // the context for looking up message translations + catContext *catalog.Context + + // buffer for accumulating output. + bytes.Buffer + + // arg holds the current item, as an interface{}. + arg interface{} + // value is used instead of arg for reflect values. + value reflect.Value + + // fmt is used to format basic items such as integers or strings. + fmt formatInfo + + // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion. + panicking bool + // erroring is set when printing an error string to guard against calling handleMethods. + erroring bool +} + +// Language implements "golang.org/x/text/internal/format".State. +func (p *printer) Language() language.Tag { return p.tag } + +func (p *printer) Width() (wid int, ok bool) { return p.fmt.Width, p.fmt.WidthPresent } + +func (p *printer) Precision() (prec int, ok bool) { return p.fmt.Prec, p.fmt.PrecPresent } + +func (p *printer) Flag(b int) bool { + switch b { + case '-': + return p.fmt.Minus + case '+': + return p.fmt.Plus || p.fmt.PlusV + case '#': + return p.fmt.Sharp || p.fmt.SharpV + case ' ': + return p.fmt.Space + case '0': + return p.fmt.Zero + } + return false +} + +// getField gets the i'th field of the struct value. +// If the field is itself is an interface, return a value for +// the thing inside the interface, not the interface itself. +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} + +func (p *printer) unknownType(v reflect.Value) { + if !v.IsValid() { + p.WriteString(nilAngleString) + return + } + p.WriteByte('?') + p.WriteString(v.Type().String()) + p.WriteByte('?') +} + +func (p *printer) badVerb(verb rune) { + p.erroring = true + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteByte('(') + switch { + case p.arg != nil: + p.WriteString(reflect.TypeOf(p.arg).String()) + p.WriteByte('=') + p.printArg(p.arg, 'v') + case p.value.IsValid(): + p.WriteString(p.value.Type().String()) + p.WriteByte('=') + p.printValue(p.value, 'v', 0) + default: + p.WriteString(nilAngleString) + } + p.WriteByte(')') + p.erroring = false +} + +func (p *printer) fmtBool(v bool, verb rune) { + switch verb { + case 't', 'v': + p.fmt.fmt_boolean(v) + default: + p.badVerb(verb) + } +} + +// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or +// not, as requested, by temporarily setting the sharp flag. +func (p *printer) fmt0x64(v uint64, leading0x bool) { + sharp := p.fmt.Sharp + p.fmt.Sharp = leading0x + p.fmt.fmt_integer(v, 16, unsigned, ldigits) + p.fmt.Sharp = sharp +} + +// fmtInteger formats a signed or unsigned integer. +func (p *printer) fmtInteger(v uint64, isSigned bool, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV && !isSigned { + p.fmt0x64(v, true) + return + } + fallthrough + case 'd': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_integer(v, 10, isSigned, ldigits) + } else { + p.fmtDecimalInt(v, isSigned) + } + case 'b': + p.fmt.fmt_integer(v, 2, isSigned, ldigits) + case 'o': + p.fmt.fmt_integer(v, 8, isSigned, ldigits) + case 'x': + p.fmt.fmt_integer(v, 16, isSigned, ldigits) + case 'X': + p.fmt.fmt_integer(v, 16, isSigned, udigits) + case 'c': + p.fmt.fmt_c(v) + case 'q': + if v <= utf8.MaxRune { + p.fmt.fmt_qc(v) + } else { + p.badVerb(verb) + } + case 'U': + p.fmt.fmt_unicode(v) + default: + p.badVerb(verb) + } +} + +// fmtFloat formats a float. The default precision for each verb +// is specified as last argument in the call to fmt_float. +func (p *printer) fmtFloat(v float64, size int, verb rune) { + switch verb { + case 'b': + p.fmt.fmt_float(v, size, verb, -1) + case 'v': + verb = 'g' + fallthrough + case 'g', 'G': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, -1) + } else { + p.fmtVariableFloat(v, size) + } + case 'e', 'E': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtScientific(v, size, 6) + } + case 'f', 'F': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtDecimalFloat(v, size, 6) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) setFlags(f *number.Formatter) { + f.Flags &^= number.ElideSign + if p.fmt.Plus || p.fmt.Space { + f.Flags |= number.AlwaysSign + if !p.fmt.Plus { + f.Flags |= number.ElideSign + } + } else { + f.Flags &^= number.AlwaysSign + } +} + +func (p *printer) updatePadding(f *number.Formatter) { + f.Flags &^= number.PadMask + if p.fmt.Minus { + f.Flags |= number.PadAfterSuffix + } else { + f.Flags |= number.PadBeforePrefix + } + f.PadRune = ' ' + f.FormatWidth = uint16(p.fmt.Width) +} + +func (p *printer) initDecimal(minFrac, maxFrac int) { + f := &p.toDecimal + f.MinIntegerDigits = 1 + f.MaxIntegerDigits = 0 + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + if p.fmt.Zero { + wid := p.fmt.Width + // Use significant integers for this. + // TODO: this is not the same as width, but so be it. + if f.MinFractionDigits > 0 { + wid -= 1 + int(f.MinFractionDigits) + } + if p.fmt.Plus || p.fmt.Space { + wid-- + } + if wid > 0 && wid > int(f.MinIntegerDigits) { + f.MinIntegerDigits = uint8(wid) + } + } + p.updatePadding(f) + } +} + +func (p *printer) initScientific(minFrac, maxFrac int) { + f := &p.toScientific + if maxFrac < 0 { + f.SetPrecision(maxFrac) + } else { + f.SetPrecision(maxFrac + 1) + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + } + f.MinExponentDigits = 2 + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + f.Flags &^= number.PadMask + if p.fmt.Zero { + f.PadRune = f.Digit(0) + f.Flags |= number.PadAfterPrefix + } else { + f.PadRune = ' ' + f.Flags |= number.PadBeforePrefix + } + p.updatePadding(f) + } +} + +func (p *printer) fmtDecimalInt(v uint64, isSigned bool) { + var d number.Decimal + + f := &p.toDecimal + if p.fmt.PrecPresent { + p.setFlags(f) + f.MinIntegerDigits = uint8(p.fmt.Prec) + f.MaxIntegerDigits = 0 + f.MinFractionDigits = 0 + f.MaxFractionDigits = 0 + if p.fmt.WidthPresent { + p.updatePadding(f) + } + } else { + p.initDecimal(0, 0) + } + d.ConvertInt(p.toDecimal.RoundingContext, isSigned, v) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtDecimalFloat(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initDecimal(prec, prec) + d.ConvertFloat(p.toDecimal.RoundingContext, v, size) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtVariableFloat(v float64, size int) { + prec := -1 + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + var d number.Decimal + p.initScientific(0, prec) + d.ConvertFloat(p.toScientific.RoundingContext, v, size) + + // Copy logic of 'g' formatting from strconv. It is simplified a bit as + // we don't have to mind having prec > len(d.Digits). + shortest := prec < 0 + ePrec := prec + if shortest { + prec = len(d.Digits) + ePrec = 6 + } else if prec == 0 { + prec = 1 + ePrec = 1 + } + exp := int(d.Exp) - 1 + if exp < -4 || exp >= ePrec { + p.initScientific(0, prec) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + } else { + if prec > int(d.Exp) { + prec = len(d.Digits) + } + if prec -= int(d.Exp); prec < 0 { + prec = 0 + } + p.initDecimal(0, prec) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) + } +} + +func (p *printer) fmtScientific(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initScientific(prec, prec) + rc := p.toScientific.RoundingContext + d.ConvertFloat(rc, v, size) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + +} + +// fmtComplex formats a complex number v with +// r = real(v) and j = imag(v) as (r+ji) using +// fmtFloat for r and j formatting. +func (p *printer) fmtComplex(v complex128, size int, verb rune) { + // Make sure any unsupported verbs are found before the + // calls to fmtFloat to not generate an incorrect error string. + switch verb { + case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E': + p.WriteByte('(') + p.fmtFloat(real(v), size/2, verb) + // Imaginary part always has a sign. + if math.IsNaN(imag(v)) { + // By CLDR's rules, NaNs do not use patterns or signs. As this code + // relies on AlwaysSign working for imaginary parts, we need to + // manually handle NaNs. + f := &p.toScientific + p.setFlags(f) + p.updatePadding(f) + p.setFlags(f) + nan := f.Symbol(number.SymNan) + extra := 0 + if w, ok := p.Width(); ok { + extra = w - utf8.RuneCountInString(nan) - 1 + } + if f.Flags&number.PadAfterNumber == 0 { + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + } + p.WriteString(f.Symbol(number.SymPlusSign)) + p.WriteString(nan) + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + p.WriteString("i)") + return + } + oldPlus := p.fmt.Plus + p.fmt.Plus = true + p.fmtFloat(imag(v), size/2, verb) + p.WriteString("i)") // TODO: use symbol? + p.fmt.Plus = oldPlus + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtString(v string, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV { + p.fmt.fmt_q(v) + } else { + p.fmt.fmt_s(v) + } + case 's': + p.fmt.fmt_s(v) + case 'x': + p.fmt.fmt_sx(v, ldigits) + case 'X': + p.fmt.fmt_sx(v, udigits) + case 'q': + p.fmt.fmt_q(v) + case 'm': + ctx := p.cat.Context(p.tag, rawPrinter{p}) + if ctx.Execute(v) == catalog.ErrNotFound { + p.WriteString(v) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtBytes(v []byte, verb rune, typeString string) { + switch verb { + case 'v', 'd': + if p.fmt.SharpV { + p.WriteString(typeString) + if v == nil { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i, c := range v { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.fmt0x64(uint64(c), true) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i, c := range v { + if i > 0 { + p.WriteByte(' ') + } + p.fmt.fmt_integer(uint64(c), 10, unsigned, ldigits) + } + p.WriteByte(']') + } + case 's': + p.fmt.fmt_s(string(v)) + case 'x': + p.fmt.fmt_bx(v, ldigits) + case 'X': + p.fmt.fmt_bx(v, udigits) + case 'q': + p.fmt.fmt_q(string(v)) + default: + p.printValue(reflect.ValueOf(v), verb, 0) + } +} + +func (p *printer) fmtPointer(value reflect.Value, verb rune) { + var u uintptr + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + u = value.Pointer() + default: + p.badVerb(verb) + return + } + + switch verb { + case 'v': + if p.fmt.SharpV { + p.WriteByte('(') + p.WriteString(value.Type().String()) + p.WriteString(")(") + if u == 0 { + p.WriteString(nilString) + } else { + p.fmt0x64(uint64(u), true) + } + p.WriteByte(')') + } else { + if u == 0 { + p.fmt.padString(nilAngleString) + } else { + p.fmt0x64(uint64(u), !p.fmt.Sharp) + } + } + case 'p': + p.fmt0x64(uint64(u), !p.fmt.Sharp) + case 'b', 'o', 'd', 'x', 'X': + if verb == 'd' { + p.fmt.Sharp = true // Print as standard go. TODO: does this make sense? + } + p.fmtInteger(uint64(u), unsigned, verb) + default: + p.badVerb(verb) + } +} + +func (p *printer) catchPanic(arg interface{}, verb rune) { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() { + p.WriteString(nilAngleString) + return + } + // Otherwise print a concise panic message. Most of the time the panic + // value will print itself nicely. + if p.panicking { + // Nested panics; the recursion in printArg cannot succeed. + panic(err) + } + + oldFlags := p.fmt.Parser + // For this output we want default behavior. + p.fmt.ClearFlags() + + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(panicString) + p.panicking = true + p.printArg(err, 'v') + p.panicking = false + p.WriteByte(')') + + p.fmt.Parser = oldFlags + } +} + +func (p *printer) handleMethods(verb rune) (handled bool) { + if p.erroring { + return + } + // Is it a Formatter? + if formatter, ok := p.arg.(format.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + if formatter, ok := p.arg.(fmt.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + + // If we're doing Go syntax and the argument knows how to supply it, take care of it now. + if p.fmt.SharpV { + if stringer, ok := p.arg.(fmt.GoStringer); ok { + handled = true + defer p.catchPanic(p.arg, verb) + // Print the result of GoString unadorned. + p.fmt.fmt_s(stringer.GoString()) + return + } + } else { + // If a string is acceptable according to the format, see if + // the value satisfies one of the string-valued interfaces. + // Println etc. set verb to %v, which is "stringable". + switch verb { + case 'v', 's', 'x', 'X', 'q': + // Is it an error or Stringer? + // The duplication in the bodies is necessary: + // setting handled and deferring catchPanic + // must happen before calling the method. + switch v := p.arg.(type) { + case error: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.Error(), verb) + return + + case fmt.Stringer: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.String(), verb) + return + } + } + } + return false +} + +func (p *printer) printArg(arg interface{}, verb rune) { + p.arg = arg + p.value = reflect.Value{} + + if arg == nil { + switch verb { + case 'T', 'v': + p.fmt.padString(nilAngleString) + default: + p.badVerb(verb) + } + return + } + + // Special processing considerations. + // %T (the value's type) and %p (its address) are special; we always do them first. + switch verb { + case 'T': + p.fmt.fmt_s(reflect.TypeOf(arg).String()) + return + case 'p': + p.fmtPointer(reflect.ValueOf(arg), 'p') + return + } + + // Some types can be done without reflection. + switch f := arg.(type) { + case bool: + p.fmtBool(f, verb) + case float32: + p.fmtFloat(float64(f), 32, verb) + case float64: + p.fmtFloat(f, 64, verb) + case complex64: + p.fmtComplex(complex128(f), 64, verb) + case complex128: + p.fmtComplex(f, 128, verb) + case int: + p.fmtInteger(uint64(f), signed, verb) + case int8: + p.fmtInteger(uint64(f), signed, verb) + case int16: + p.fmtInteger(uint64(f), signed, verb) + case int32: + p.fmtInteger(uint64(f), signed, verb) + case int64: + p.fmtInteger(uint64(f), signed, verb) + case uint: + p.fmtInteger(uint64(f), unsigned, verb) + case uint8: + p.fmtInteger(uint64(f), unsigned, verb) + case uint16: + p.fmtInteger(uint64(f), unsigned, verb) + case uint32: + p.fmtInteger(uint64(f), unsigned, verb) + case uint64: + p.fmtInteger(f, unsigned, verb) + case uintptr: + p.fmtInteger(uint64(f), unsigned, verb) + case string: + p.fmtString(f, verb) + case []byte: + p.fmtBytes(f, verb, "[]byte") + case reflect.Value: + // Handle extractable values with special methods + // since printValue does not handle them at depth 0. + if f.IsValid() && f.CanInterface() { + p.arg = f.Interface() + if p.handleMethods(verb) { + return + } + } + p.printValue(f, verb, 0) + default: + // If the type is not simple, it might have methods. + if !p.handleMethods(verb) { + // Need to use reflection, since the type had no + // interface methods that could be used for formatting. + p.printValue(reflect.ValueOf(f), verb, 0) + } + } +} + +// printValue is similar to printArg but starts with a reflect value, not an interface{} value. +// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg. +func (p *printer) printValue(value reflect.Value, verb rune, depth int) { + // Handle values with special methods if not already handled by printArg (depth == 0). + if depth > 0 && value.IsValid() && value.CanInterface() { + p.arg = value.Interface() + if p.handleMethods(verb) { + return + } + } + p.arg = nil + p.value = value + + switch f := value; value.Kind() { + case reflect.Invalid: + if depth == 0 { + p.WriteString(invReflectString) + } else { + switch verb { + case 'v': + p.WriteString(nilAngleString) + default: + p.badVerb(verb) + } + } + case reflect.Bool: + p.fmtBool(f.Bool(), verb) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.fmtInteger(uint64(f.Int()), signed, verb) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.fmtInteger(f.Uint(), unsigned, verb) + case reflect.Float32: + p.fmtFloat(f.Float(), 32, verb) + case reflect.Float64: + p.fmtFloat(f.Float(), 64, verb) + case reflect.Complex64: + p.fmtComplex(f.Complex(), 64, verb) + case reflect.Complex128: + p.fmtComplex(f.Complex(), 128, verb) + case reflect.String: + p.fmtString(f.String(), verb) + case reflect.Map: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + } else { + p.WriteString(mapString) + } + keys := f.MapKeys() + for i, key := range keys { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + p.printValue(key, verb, depth+1) + p.WriteByte(':') + p.printValue(f.MapIndex(key), verb, depth+1) + } + if p.fmt.SharpV { + p.WriteByte('}') + } else { + p.WriteByte(']') + } + case reflect.Struct: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + } + p.WriteByte('{') + for i := 0; i < f.NumField(); i++ { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + if p.fmt.PlusV || p.fmt.SharpV { + if name := f.Type().Field(i).Name; name != "" { + p.WriteString(name) + p.WriteByte(':') + } + } + p.printValue(getField(f, i), verb, depth+1) + } + p.WriteByte('}') + case reflect.Interface: + value := f.Elem() + if !value.IsValid() { + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + p.WriteString(nilParenString) + } else { + p.WriteString(nilAngleString) + } + } else { + p.printValue(value, verb, depth+1) + } + case reflect.Array, reflect.Slice: + switch verb { + case 's', 'q', 'x', 'X': + // Handle byte and uint8 slices and arrays special for the above verbs. + t := f.Type() + if t.Elem().Kind() == reflect.Uint8 { + var bytes []byte + if f.Kind() == reflect.Slice { + bytes = f.Bytes() + } else if f.CanAddr() { + bytes = f.Slice(0, f.Len()).Bytes() + } else { + // We have an array, but we cannot Slice() a non-addressable array, + // so we build a slice by hand. This is a rare case but it would be nice + // if reflection could help a little more. + bytes = make([]byte, f.Len()) + for i := range bytes { + bytes[i] = byte(f.Index(i).Uint()) + } + } + p.fmtBytes(bytes, verb, t.String()) + return + } + } + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.Kind() == reflect.Slice && f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteByte(' ') + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte(']') + } + case reflect.Ptr: + // pointer to array or slice or struct? ok at top level + // but not embedded (avoid loops) + if depth == 0 && f.Pointer() != 0 { + switch a := f.Elem(); a.Kind() { + case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map: + p.WriteByte('&') + p.printValue(a, verb, depth+1) + return + } + } + fallthrough + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + p.fmtPointer(f, verb) + default: + p.unknownType(f) + } +} + +func (p *printer) badArgNum(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(badIndexString) +} + +func (p *printer) missingArg(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(missingString) +} + +func (p *printer) doPrintf(fmt string) { + for p.fmt.Parser.SetFormat(fmt); p.fmt.Scan(); { + switch p.fmt.Status { + case format.StatusText: + p.WriteString(p.fmt.Text()) + case format.StatusSubstitution: + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadWidthSubstitution: + p.WriteString(badWidthString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadPrecSubstitution: + p.WriteString(badPrecString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusNoVerb: + p.WriteString(noVerbString) + case format.StatusBadArgNum: + p.badArgNum(p.fmt.Verb) + case format.StatusMissingArg: + p.missingArg(p.fmt.Verb) + default: + panic("unreachable") + } + } + + // Check for extra arguments, but only if there was at least one ordered + // argument. Note that this behavior is necessarily different from fmt: + // different variants of messages may opt to drop some or all of the + // arguments. + if !p.fmt.Reordered && p.fmt.ArgNum < len(p.fmt.Args) && p.fmt.ArgNum != 0 { + p.fmt.ClearFlags() + p.WriteString(extraString) + for i, arg := range p.fmt.Args[p.fmt.ArgNum:] { + if i > 0 { + p.WriteString(commaSpaceString) + } + if arg == nil { + p.WriteString(nilAngleString) + } else { + p.WriteString(reflect.TypeOf(arg).String()) + p.WriteString("=") + p.printArg(arg, 'v') + } + } + p.WriteByte(')') + } +} + +func (p *printer) doPrint(a []interface{}) { + prevString := false + for argNum, arg := range a { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + // Add a space between two non-string arguments. + if argNum > 0 && !isString && !prevString { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + prevString = isString + } +} + +// doPrintln is like doPrint but always adds a space between arguments +// and a newline after the last argument. +func (p *printer) doPrintln(a []interface{}) { + for argNum, arg := range a { + if argNum > 0 { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + } + p.WriteByte('\n') +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 1413ee13d..1914bb476 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -9,11 +9,11 @@ package assign import ( _ "embed" - "fmt" "go/ast" "go/token" "go/types" "reflect" + "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" @@ -48,31 +48,84 @@ func run(pass *analysis.Pass) (any, error) { // If LHS and RHS have different cardinality, they can't be the same. return } + + // Delete redundant LHS, RHS pairs, taking care + // to include intervening commas. + var ( + exprs []string // expressions appearing on both sides (x = x) + edits []analysis.TextEdit + runStartLHS, runStartRHS token.Pos // non-zero => within a run + ) for i, lhs := range stmt.Lhs { rhs := stmt.Rhs[i] - if analysisutil.HasSideEffects(pass.TypesInfo, lhs) || - analysisutil.HasSideEffects(pass.TypesInfo, rhs) || - isMapIndex(pass.TypesInfo, lhs) { - continue // expressions may not be equal - } - if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) { - continue // short-circuit the heavy-weight gofmt check + isSelfAssign := false + var le string + + if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) && + !analysisutil.HasSideEffects(pass.TypesInfo, rhs) && + !isMapIndex(pass.TypesInfo, lhs) && + reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check + + le = analysisinternal.Format(pass.Fset, lhs) + re := analysisinternal.Format(pass.Fset, rhs) + if le == re { + isSelfAssign = true + } } - le := analysisinternal.Format(pass.Fset, lhs) - re := analysisinternal.Format(pass.Fset, rhs) - if le == re { - pass.Report(analysis.Diagnostic{ - Pos: stmt.Pos(), Message: fmt.Sprintf("self-assignment of %s to %s", re, le), - SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove self-assignment", - TextEdits: []analysis.TextEdit{{ - Pos: stmt.Pos(), - End: stmt.End(), - }}}, - }, - }) + + if isSelfAssign { + exprs = append(exprs, le) + if !runStartLHS.IsValid() { + // Start of a new run of self-assignments. + if i > 0 { + runStartLHS = stmt.Lhs[i-1].End() + runStartRHS = stmt.Rhs[i-1].End() + } else { + runStartLHS = lhs.Pos() + runStartRHS = rhs.Pos() + } + } + } else if runStartLHS.IsValid() { + // End of a run of self-assignments. + endLHS, endRHS := stmt.Lhs[i-1].End(), stmt.Rhs[i-1].End() + if runStartLHS == stmt.Lhs[0].Pos() { + endLHS, endRHS = lhs.Pos(), rhs.Pos() + } + edits = append(edits, + analysis.TextEdit{Pos: runStartLHS, End: endLHS}, + analysis.TextEdit{Pos: runStartRHS, End: endRHS}, + ) + runStartLHS, runStartRHS = 0, 0 } } + + // If a run of self-assignments continues to the end of the statement, close it. + if runStartLHS.IsValid() { + last := len(stmt.Lhs) - 1 + edits = append(edits, + analysis.TextEdit{Pos: runStartLHS, End: stmt.Lhs[last].End()}, + analysis.TextEdit{Pos: runStartRHS, End: stmt.Rhs[last].End()}, + ) + } + + if len(exprs) == 0 { + return + } + + if len(exprs) == len(stmt.Lhs) { + // If every part of the statement is a self-assignment, + // remove the whole statement. + edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}} + } + + pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + Message: "self-assignment of " + strings.Join(exprs, ", "), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove self-assignment", + TextEdits: edits, + }}, + }) }) return nil, nil diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go b/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go new file mode 100644 index 000000000..429125a8b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go @@ -0,0 +1,135 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stdversion reports uses of standard library symbols that are +// "too new" for the Go version in force in the referring file. +package stdversion + +import ( + "go/ast" + "go/build" + "go/types" + "regexp" + "slices" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" +) + +const Doc = `report uses of too-new standard library symbols + +The stdversion analyzer reports references to symbols in the standard +library that were introduced by a Go release higher than the one in +force in the referring file. (Recall that the file's Go version is +defined by the 'go' directive its module's go.mod file, or by a +"//go:build go1.X" build tag at the top of the file.) + +The analyzer does not report a diagnostic for a reference to a "too +new" field or method of a type that is itself "too new", as this may +have false positives, for example if fields or methods are accessed +through a type alias that is guarded by a Go version constraint. +` + +var Analyzer = &analysis.Analyzer{ + Name: "stdversion", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdversion", + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + // Prior to go1.22, versions.FileVersion returns only the + // toolchain version, which is of no use to us, so + // disable this analyzer on earlier versions. + if !slices.Contains(build.Default.ReleaseTags, "go1.22") { + return nil, nil + } + + // Don't report diagnostics for modules marked before go1.21, + // since at that time the go directive wasn't clearly + // specified as a toolchain requirement. + pkgVersion := pass.Pkg.GoVersion() + if !versions.AtLeast(pkgVersion, "go1.21") { + return nil, nil + } + + // disallowedSymbols returns the set of standard library symbols + // in a given package that are disallowed at the specified Go version. + type key struct { + pkg *types.Package + version string + } + memo := make(map[key]map[types.Object]string) // records symbol's minimum Go version + disallowedSymbols := func(pkg *types.Package, version string) map[types.Object]string { + k := key{pkg, version} + disallowed, ok := memo[k] + if !ok { + disallowed = typesinternal.TooNewStdSymbols(pkg, version) + memo[k] = disallowed + } + return disallowed + } + + // Scan the syntax looking for references to symbols + // that are disallowed by the version of the file. + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.File)(nil), + (*ast.Ident)(nil), + } + var fileVersion string // "" => no check + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.File: + if ast.IsGenerated(n) { + // Suppress diagnostics in generated files (such as cgo). + fileVersion = "" + } else { + fileVersion = versions.Lang(versions.FileVersion(pass.TypesInfo, n)) + // (may be "" if unknown) + } + + case *ast.Ident: + if fileVersion != "" { + if obj, ok := pass.TypesInfo.Uses[n]; ok && obj.Pkg() != nil { + disallowed := disallowedSymbols(obj.Pkg(), fileVersion) + if minVersion, ok := disallowed[origin(obj)]; ok { + noun := "module" + if fileVersion != pkgVersion { + noun = "file" + } + pass.ReportRangef(n, "%s.%s requires %v or later (%s is %s)", + obj.Pkg().Name(), obj.Name(), minVersion, noun, fileVersion) + } + } + } + } + }) + return nil, nil +} + +// Matches cgo generated comment as well as the proposed standard: +// +// https://golang.org/s/generatedcode +var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) + +// origin returns the original uninstantiated symbol for obj. +func origin(obj types.Object) types.Object { + switch obj := obj.(type) { + case *types.Var: + return obj.Origin() + case *types.Func: + return obj.Origin() + case *types.TypeName: + if named, ok := obj.Type().(*types.Named); ok { // (don't unalias) + return named.Origin().Obj() + } + } + return obj +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go new file mode 100644 index 000000000..207f74183 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package waitgroup defines an Analyzer that detects simple misuses +// of sync.WaitGroup. +// +// # Analyzer waitgroup +// +// waitgroup: check for misuses of sync.WaitGroup +// +// This analyzer detects mistaken calls to the (*sync.WaitGroup).Add +// method from inside a new goroutine, causing Add to race with Wait: +// +// // WRONG +// var wg sync.WaitGroup +// go func() { +// wg.Add(1) // "WaitGroup.Add called from inside new goroutine" +// defer wg.Done() +// ... +// }() +// wg.Wait() // (may return prematurely before new goroutine starts) +// +// The correct code calls Add before starting the goroutine: +// +// // RIGHT +// var wg sync.WaitGroup +// wg.Add(1) +// go func() { +// defer wg.Done() +// ... +// }() +// wg.Wait() +package waitgroup diff --git a/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go new file mode 100644 index 000000000..14c6986ea --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -0,0 +1,91 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package waitgroup defines an Analyzer that detects simple misuses +// of sync.WaitGroup. +package waitgroup + +import ( + _ "embed" + "go/ast" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" +) + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "waitgroup", + Doc: analysisutil.MustExtractDoc(doc, "waitgroup"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + if !analysisinternal.Imports(pass.Pkg, "sync") { + return nil, nil // doesn't directly import sync + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) (proceed bool) { + if push { + call := n.(*ast.CallExpr) + obj := typeutil.Callee(pass.TypesInfo, call) + if analysisinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && + hasSuffix(stack, wantSuffix) && + backindex(stack, 1) == backindex(stack, 2).(*ast.BlockStmt).List[0] { // ExprStmt must be Block's first stmt + + pass.Reportf(call.Lparen, "WaitGroup.Add called from inside new goroutine") + } + } + return true + }) + + return nil, nil +} + +// go func() { +// wg.Add(1) +// ... +// }() +var wantSuffix = []ast.Node{ + (*ast.GoStmt)(nil), + (*ast.CallExpr)(nil), + (*ast.FuncLit)(nil), + (*ast.BlockStmt)(nil), + (*ast.ExprStmt)(nil), + (*ast.CallExpr)(nil), +} + +// hasSuffix reports whether stack has the matching suffix, +// considering only node types. +func hasSuffix(stack, suffix []ast.Node) bool { + // TODO(adonovan): the inspector could implement this for us. + if len(stack) < len(suffix) { + return false + } + for i := range len(suffix) { + if reflect.TypeOf(backindex(stack, i)) != reflect.TypeOf(backindex(suffix, i)) { + return false + } + } + return true +} + +// backindex is like [slices.Index] but from the back of the slice. +func backindex[T any](slice []T, i int) T { + return slice[len(slice)-1-i] +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 89f5097be..0fb4e7eea 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -113,7 +113,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // childrenOf elides the FuncType node beneath FuncDecl. // Add it back here for TypeParams, Params, Results, // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. + // even though it is the tree at FuncDecl.Type.Func. if decl, ok := node.(*ast.FuncDecl); ok { if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { path = append(path, decl.Type) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index bc44b2c8e..a703cdfcf 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -85,6 +85,7 @@ type event struct { // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). // Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index e936c67c9..9852331a3 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( @@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 { return 0 } -//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf func maskOf(nodes []ast.Node) uint64 { if len(nodes) == 0 { return math.MaxUint64 // match all node types diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10e..366aab6b2 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -76,6 +76,8 @@ uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. # The driver protocol diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 96e43cd80..89f89dd2d 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -224,13 +224,22 @@ extractQueries: return response.dr, nil } +// abs returns an absolute representation of path, based on cfg.Dir. +func (cfg *Config) abs(path string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + // In case cfg.Dir is relative, pass it to filepath.Abs. + return filepath.Abs(filepath.Join(cfg.Dir, path)) +} + func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) // Pass absolute path of directory to go list so that it knows to treat it as a directory, // not a package path. - pattern, err := filepath.Abs(fdir) + pattern, err := state.cfg.abs(fdir) if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } @@ -703,9 +712,8 @@ func (state *golistState) getGoVersion() (int, error) { // getPkgPath finds the package path of a directory if it's relative to a root // directory. func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err + if !filepath.IsAbs(dir) { + panic("non-absolute dir passed to getPkgPath") } roots, err := state.determineRootDirs() if err != nil { @@ -715,7 +723,7 @@ func (state *golistState) getPkgPath(dir string) (string, bool, error) { for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { + if !strings.HasPrefix(dir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index d823c474a..d9d5a45cd 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -55,7 +55,7 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error) } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) + absDir, err := state.cfg.abs(mod.Dir) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index fe713a77b..a5ef8fb40 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -138,7 +138,7 @@ type builder struct { finished int // finished is the length of the prefix of fns containing built functions. // The task of building shared functions within the builder. - // Shared functions are ones the the builder may either create or lookup. + // Shared functions are ones the builder may either create or lookup. // These may be built by other builders in parallel. // The task is done when the builder has finished iterating, and it // waits for all shared functions to finish building. diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go index 05229f06c..ca745d4a1 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -15,6 +15,10 @@ import ( // This code is here rather than in the modindex package // to avoid import loops +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + // implements Source using modindex, so only for module cache. // // this is perhaps over-engineered. A new Index is read at first use. @@ -22,8 +26,8 @@ import ( // is read if the index changed. It is not clear the Mutex is needed. type IndexSource struct { modcachedir string - mutex sync.Mutex - ix *modindex.Index + mu sync.Mutex + index *modindex.Index // (access via getIndex) expires time.Time } @@ -39,13 +43,14 @@ func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths } func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { + index, err := s.getIndex() + if err != nil { return nil, err } var cs []modindex.Candidate for pkg, nms := range missing { for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) + x := index.Lookup(pkg, nm, false) cs = append(cs, x...) } } @@ -74,30 +79,22 @@ func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, mi return ans, nil } -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) if err != nil { - return err + return nil, err } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) } + // Inv: s.index != nil - return nil + return s.index, nil } diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go index 2faa6ce0b..9a963744b 100644 --- a/vendor/golang.org/x/tools/internal/modindex/directories.go +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "regexp" - "slices" "strings" "sync" "time" @@ -20,50 +19,48 @@ import ( ) type directory struct { - path Relpath + path string // relative to GOMODCACHE importPath string version string // semantic version - syms []symbol } -// byImportPath groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) if err != nil { return nil, err } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } } - return ans, nil + return dirsByPath, nil } -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) } // modCacheRegexp splits a relpathpath into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { m := modCacheRegexp.FindStringSubmatch(string(dir)) // m[1] is the module path // m[2] is the version major.minor.patch(-
 that contains the name
+// Package modindex contains code for building and searching an
+// [Index] of the Go module cache.
+package modindex
+
+// The directory containing the index, returned by
+// [IndexDir], contains a file index-name- that contains the name
 // of the current index. We believe writing that short file is atomic.
-// ReadIndex reads that file to get the file name of the index.
+// [Read] reads that file to get the file name of the index.
 // WriteIndex writes an index with a unique name and then
 // writes that name into a new version of index-name-.
 // ( stands for the CurrentVersion of the index format.)
-package modindex
 
 import (
+	"maps"
+	"os"
 	"path/filepath"
 	"slices"
 	"strings"
@@ -21,144 +25,95 @@ import (
 	"golang.org/x/mod/semver"
 )
 
-// Create always creates a new index for the go module cache that is in cachedir.
-func Create(cachedir string) error {
-	_, err := indexModCache(cachedir, true)
-	return err
-}
-
-// Update the index for the go module cache that is in cachedir,
-// If there is no existing index it will build one.
-// If there are changed directories since the last index, it will
-// write a new one and return true. Otherwise it returns false.
-func Update(cachedir string) (bool, error) {
-	return indexModCache(cachedir, false)
+// Update updates the index for the specified Go
+// module cache directory, creating it as needed.
+// On success it returns the current index.
+func Update(gomodcache string) (*Index, error) {
+	prev, err := Read(gomodcache)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+		prev = nil
+	}
+	return update(gomodcache, prev)
 }
 
-// indexModCache writes an index current as of when it is called.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and the updates to the cache. It returns true if it wrote an index,
-// false otherwise.
-func indexModCache(cachedir string, clear bool) (bool, error) {
-	cachedir, err := filepath.Abs(cachedir)
+// update builds, writes, and returns the current index.
+//
+// If old is nil, the new index is built from all of GOMODCACHE;
+// otherwise it is built from the old index plus cache updates
+// since the previous index's time.
+func update(gomodcache string, old *Index) (*Index, error) {
+	gomodcache, err := filepath.Abs(gomodcache)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	cd := Abspath(cachedir)
-	future := time.Now().Add(24 * time.Hour) // safely in the future
-	ok, err := modindexTimed(future, cd, clear)
+	new, changed, err := build(gomodcache, old)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	return ok, nil
-}
-
-// modindexTimed writes an index current as of onlyBefore.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and all the updates to the cache before onlyBefore.
-// It returns true if it wrote a new index, false if it wrote nothing.
-func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
-	var curIndex *Index
-	if !clear {
-		var err error
-		curIndex, err = ReadIndex(string(cachedir))
-		if clear && err != nil {
-			return false, err
+	if old == nil || changed {
+		if err := write(gomodcache, new); err != nil {
+			return nil, err
 		}
-		// TODO(pjw): check that most of those directories still exist
-	}
-	cfg := &work{
-		onlyBefore: onlyBefore,
-		oldIndex:   curIndex,
-		cacheDir:   cachedir,
-	}
-	if curIndex != nil {
-		cfg.onlyAfter = curIndex.Changed
-	}
-	if err := cfg.buildIndex(); err != nil {
-		return false, err
 	}
-	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
-		// no changes from existing curIndex, don't write a new index
-		return false, nil
-	}
-	if err := cfg.writeIndex(); err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
-type work struct {
-	onlyBefore time.Time // do not use directories later than this
-	onlyAfter  time.Time // only interested in directories after this
-	// directories from before onlyAfter come from oldIndex
-	oldIndex *Index
-	newIndex *Index
-	cacheDir Abspath
+	return new, nil
 }
 
-func (w *work) buildIndex() error {
-	// The effective date of the new index should be at least
-	// slightly earlier than when the directories are scanned
-	// so set it now.
-	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
-	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
-	if len(dirs) == 0 {
-		return nil
+// build returns a new index for the specified Go module cache (an
+// absolute path).
+//
+// If an old index is provided, only directories more recent than it
+// that it are scanned; older directories are provided by the old
+// Index.
+//
+// The boolean result indicates whether new entries were found.
+func build(gomodcache string, old *Index) (*Index, bool, error) {
+	// Set the time window.
+	var start time.Time // = dawn of time
+	if old != nil {
+		start = old.ValidAt
 	}
-	newdirs, err := byImportPath(dirs)
+	now := time.Now()
+	end := now.Add(24 * time.Hour) // safely in the future
+
+	// Enumerate GOMODCACHE package directories.
+	// Choose the best (latest) package for each import path.
+	pkgDirs := findDirs(gomodcache, start, end)
+	dirByPath, err := bestDirByImportPath(pkgDirs)
 	if err != nil {
-		return err
+		return nil, false, err
 	}
-	// for each import path it might occur only in newdirs,
-	// only in w.oldIndex, or in both.
-	// If it occurs in both, use the semantically later one
-	if w.oldIndex != nil {
-		for _, e := range w.oldIndex.Entries {
-			found, ok := newdirs[e.ImportPath]
-			if !ok {
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				continue // use this one, there is no new one
-			}
-			if semver.Compare(found[0].version, e.Version) > 0 {
-				// use the new one
-			} else {
-				// use the old one, forget the new one
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				delete(newdirs, e.ImportPath)
+
+	// For each import path it might occur only in
+	// dirByPath, only in old, or in both.
+	// If both, use the semantically later one.
+	var entries []Entry
+	if old != nil {
+		for _, entry := range old.Entries {
+			dir, ok := dirByPath[entry.ImportPath]
+			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
+				// New dir is missing or not more recent; use old entry.
+				entries = append(entries, entry)
+				delete(dirByPath, entry.ImportPath)
 			}
 		}
 	}
-	// get symbol information for all the new diredtories
-	getSymbols(w.cacheDir, newdirs)
-	// assemble the new index entries
-	for k, v := range newdirs {
-		d := v[0]
-		pkg, names := processSyms(d.syms)
-		if pkg == "" {
-			continue // PJW: does this ever happen?
-		}
-		entry := Entry{
-			PkgName:    pkg,
-			Dir:        d.path,
-			ImportPath: k,
-			Version:    d.version,
-			Names:      names,
-		}
-		w.newIndex.Entries = append(w.newIndex.Entries, entry)
-	}
-	// sort the entries in the new index
-	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
-		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+
+	// Extract symbol information for all the new directories.
+	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
+	entries = append(entries, newEntries...)
+	slices.SortFunc(entries, func(x, y Entry) int {
+		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
 			return n
 		}
-		return strings.Compare(l.ImportPath, r.ImportPath)
+		return strings.Compare(x.ImportPath, y.ImportPath)
 	})
-	return nil
-}
 
-func (w *work) writeIndex() error {
-	return writeIndex(w.cacheDir, w.newIndex)
+	return &Index{
+		GOMODCACHE: gomodcache,
+		ValidAt:    now, // time before the directories were scanned
+		Entries:    entries,
+	}, len(newEntries) > 0, nil
 }
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
index 31a502c58..fe24db9b1 100644
--- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -10,11 +10,13 @@ import (
 	"go/parser"
 	"go/token"
 	"go/types"
+	"iter"
 	"os"
 	"path/filepath"
 	"runtime"
 	"slices"
 	"strings"
+	"sync"
 
 	"golang.org/x/sync/errgroup"
 )
@@ -34,41 +36,65 @@ type symbol struct {
 	sig  string // signature information, for F
 }
 
-// find the symbols for the best directories
-func getSymbols(cd Abspath, dirs map[string][]*directory) {
+// extractSymbols returns a (new, unordered) array of Entries, one for
+// each provided package directory, describing its exported symbols.
+func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
+	var (
+		mu      sync.Mutex
+		entries []Entry
+	)
+
 	var g errgroup.Group
 	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
-	for _, vv := range dirs {
-		// throttling some day?
-		d := vv[0]
+	for dir := range dirs {
 		g.Go(func() error {
-			thedir := filepath.Join(string(cd), string(d.path))
+			thedir := filepath.Join(cwd, string(dir.path))
 			mode := parser.SkipObjectResolution | parser.ParseComments
 
-			fi, err := os.ReadDir(thedir)
+			// Parse all Go files in dir and extract symbols.
+			dirents, err := os.ReadDir(thedir)
 			if err != nil {
 				return nil // log this someday?
 			}
-			for _, fx := range fi {
-				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+			var syms []symbol
+			for _, dirent := range dirents {
+				if !strings.HasSuffix(dirent.Name(), ".go") ||
+					strings.HasSuffix(dirent.Name(), "_test.go") {
 					continue
 				}
-				fname := filepath.Join(thedir, fx.Name())
+				fname := filepath.Join(thedir, dirent.Name())
 				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
 				if err != nil {
 					continue // ignore errors, someday log them?
 				}
-				d.syms = append(d.syms, getFileExports(tr)...)
+				syms = append(syms, getFileExports(tr)...)
+			}
+
+			// Create an entry for the package.
+			pkg, names := processSyms(syms)
+			if pkg != "" {
+				mu.Lock()
+				defer mu.Unlock()
+				entries = append(entries, Entry{
+					PkgName:    pkg,
+					Dir:        dir.path,
+					ImportPath: dir.importPath,
+					Version:    dir.version,
+					Names:      names,
+				})
 			}
+
 			return nil
 		})
 	}
-	g.Wait()
+	g.Wait() // ignore error
+
+	return entries
 }
 
 func getFileExports(f *ast.File) []symbol {
 	pkg := f.Name.Name
-	if pkg == "main" {
+	if pkg == "main" || pkg == "" {
 		return nil
 	}
 	var ans []symbol
@@ -202,17 +228,18 @@ func processSyms(syms []symbol) (string, []string) {
 	pkg := syms[0].pkg
 	var names []string
 	for _, s := range syms {
+		if s.pkg != pkg {
+			// Symbols came from two files in same dir
+			// with different package declarations.
+			continue
+		}
 		var nx string
-		if s.pkg == pkg {
-			if s.sig != "" {
-				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
-			} else {
-				nx = fmt.Sprintf("%s %s", s.name, s.kind)
-			}
-			names = append(names, nx)
+		if s.sig != "" {
+			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
 		} else {
-			continue // PJW: do we want to keep track of these?
+			nx = fmt.Sprintf("%s %s", s.name, s.kind)
 		}
+		names = append(names, nx)
 	}
 	return pkg, names
 }
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
deleted file mode 100644
index ece448863..000000000
--- a/vendor/golang.org/x/tools/internal/modindex/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modindex
-
-import (
-	"strings"
-)
-
-// some special types to avoid confusions
-
-// distinguish various types of directory names. It's easy to get confused.
-type Abspath string // absolute paths
-type Relpath string // paths with GOMODCACHE prefix removed
-
-func toRelpath(cachedir Abspath, s string) Relpath {
-	if strings.HasPrefix(s, string(cachedir)) {
-		if s == string(cachedir) {
-			return Relpath("")
-		}
-		return Relpath(s[len(cachedir)+1:])
-	}
-	return Relpath(s)
-}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 73eefa2a7..929b470be 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,6 +5,8 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
+import "fmt"
+
 var GetDepsErrors = func(p any) []*PackageError { return nil }
 
 type PackageError struct {
@@ -13,5 +15,9 @@ type PackageError struct {
 	Err         string   // the error itself
 }
 
+func (err PackageError) String() string {
+	return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
+}
+
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
index 0d7823b3c..d88162ff5 100644
--- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
+++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
@@ -70,12 +70,14 @@ func CreatePatch(a, b []byte) ([]Operation, error) {
 	}
 	var aI interface{}
 	var bI interface{}
-	err := json.Unmarshal(a, &aI)
-	if err != nil {
+	aDec := json.NewDecoder(bytes.NewReader(a))
+	aDec.UseNumber()
+	if err := aDec.Decode(&aI); err != nil {
 		return nil, errBadJSONDoc
 	}
-	err = json.Unmarshal(b, &bI)
-	if err != nil {
+	bDec := json.NewDecoder(bytes.NewReader(b))
+	bDec.UseNumber()
+	if err := bDec.Decode(&bI); err != nil {
 		return nil, errBadJSONDoc
 	}
 	return handleValues(aI, bI, "", []Operation{})
@@ -94,6 +96,11 @@ func matchesValue(av, bv interface{}) bool {
 		if ok && bt == at {
 			return true
 		}
+	case json.Number:
+		bt, ok := bv.(json.Number)
+		if ok && bt == at {
+			return true
+		}
 	case float64:
 		bt, ok := bv.(float64)
 		if ok && bt == at {
@@ -212,7 +219,7 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation,
 		if err != nil {
 			return nil, err
 		}
-	case string, float64, bool:
+	case string, float64, bool, json.Number:
 		if !matchesValue(av, bv) {
 			patch = append(patch, NewOperation("replace", p, bv))
 		}
diff --git a/vendor/honnef.co/go/tools/analysis/code/code.go b/vendor/honnef.co/go/tools/analysis/code/code.go
index 7d20ea4c9..e456947fa 100644
--- a/vendor/honnef.co/go/tools/analysis/code/code.go
+++ b/vendor/honnef.co/go/tools/analysis/code/code.go
@@ -178,7 +178,7 @@ func CallName(pass *analysis.Pass, call *ast.CallExpr) string {
 		fun = idx.X
 	}
 
-	// (foo)[T] is not a valid instantiationg, so no need to unparen again.
+	// (foo)[T] is not a valid instantiation, so no need to unparen again.
 
 	switch fun := fun.(type) {
 	case *ast.SelectorExpr:
@@ -525,7 +525,7 @@ func StdlibVersion(pass *analysis.Pass, node Positioner) string {
 			// Do note that strictly speaking we're conflating the Go version and the
 			// module version in our check. Nothing is stopping a user from using Go 1.17
 			// (which didn't implement the new rules for versions in go.mod) to build a Go
-			// 1.22 module, in which case a file tagged with go1.17 will not have acces to the 1.22
+			// 1.22 module, in which case a file tagged with go1.17 will not have access to the 1.22
 			// standard library. However, we believe that if a module requires 1.21 or
 			// newer, then the author clearly expects the new behavior, and doesn't care
 			// for the old one. Otherwise they would've specified an older version.
diff --git a/vendor/honnef.co/go/tools/go/ir/builder.go b/vendor/honnef.co/go/tools/go/ir/builder.go
index 192eaaa50..fb6183a50 100644
--- a/vendor/honnef.co/go/tools/go/ir/builder.go
+++ b/vendor/honnef.co/go/tools/go/ir/builder.go
@@ -895,7 +895,7 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) {
 // returns the effective receiver after applying the implicit field
 // selections of sel.
 //
-// wantAddr requests that the result is an an address.  If
+// wantAddr requests that the result is an address.  If
 // !sel.Indirect(), this may require that e be built in addr() mode; it
 // must thus be addressable.
 //
@@ -2058,7 +2058,7 @@ func (b *builder) forStmtGo122(fn *Function, s *ast.ForStmt, label *lblock) {
 		fn.emit(phi, lhs)
 
 		fn.currentBlock = post
-		// If next is is local, it reuses the address and zeroes the old value so
+		// If next is local, it reuses the address and zeroes the old value so
 		// load before allocating next.
 		load := emitLoad(fn, phi, init)
 		next := emitLocal(fn, v.Type(), lhs, v.Name())
@@ -3153,7 +3153,7 @@ func (b *builder) buildFunction(fn *Function) {
 func (b *builder) buildYieldFunc(fn *Function) {
 	// See builder.rangeFunc for detailed documentation on how fn is set up.
 	//
-	// In psuedo-Go this roughly builds:
+	// In pseudo-Go this roughly builds:
 	// func yield(_k tk, _v tv) bool {
 	//         if jump != READY { panic("yield function called after range loop exit") }
 	//     jump = BUSY
diff --git a/vendor/honnef.co/go/tools/go/ir/emit.go b/vendor/honnef.co/go/tools/go/ir/emit.go
index e4304d10a..b04852d4e 100644
--- a/vendor/honnef.co/go/tools/go/ir/emit.go
+++ b/vendor/honnef.co/go/tools/go/ir/emit.go
@@ -156,7 +156,7 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast
 }
 
 // emitCompare emits to f code compute the boolean result of
-// comparison comparison 'x op y'.
+// comparison 'x op y'.
 func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value {
 	xt := x.Type().Underlying()
 	yt := y.Type().Underlying()
diff --git a/vendor/honnef.co/go/tools/knowledge/deprecated.go b/vendor/honnef.co/go/tools/knowledge/deprecated.go
index 854e5bd1f..f7dd4fce0 100644
--- a/vendor/honnef.co/go/tools/knowledge/deprecated.go
+++ b/vendor/honnef.co/go/tools/knowledge/deprecated.go
@@ -164,7 +164,7 @@ var StdlibDeprecations = map[string]Deprecation{
 	"syscall.GetQueuedCompletionStatus":     {"go1.17", "go1.0"},
 	"syscall.CreateIoCompletionPort":        {"go1.17", "go1.0"},
 
-	// We choose to only track the package itself, even though all functions are derecated individually, too. Anyone
+	// We choose to only track the package itself, even though all functions are deprecated individually, too. Anyone
 	// using ioutil directly will have to import it, and this keeps the noise down.
 	"io/ioutil": {"go1.19", "go1.19"},
 
@@ -209,6 +209,14 @@ var StdlibDeprecations = map[string]Deprecation{
 	"go/ast.Scope":                {"go1.22", "go1.0"},
 	"html/template.ErrJSTemplate": {"go1.22", DeprecatedUseNoLonger},
 	"reflect.PtrTo":               {"go1.22", "go1.18"},
+
+	// Technically, runtime.GOROOT could be considered DeprecatedNeverUse, but
+	// using it used to be a lot more common and accepted.
+	"runtime.GOROOT": {"go1.24", DeprecatedUseNoLonger},
+	// These are never safe to use; a concrete alternative was added in Go 1.2 (crypto/cipher.AEAD).
+	"crypto/cipher.NewCFBDecrypter": {"go1.24", "go1.2"},
+	"crypto/cipher.NewCFBEncrypter": {"go1.24", "go1.2"},
+	"crypto/cipher.NewOFB":          {"go1.24", "go1.2"},
 }
 
-// Last imported from Go at e8ee1dc4f9e2632ba1018610d1a1187743ae397f
+// Last imported from GOROOT/api/go1.24.txt at fadfe2fc80f6b37e99b3e7aa068112ff539717c9.
diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go
index 4fb4f8ec6..3c4b5ffb8 100644
--- a/vendor/honnef.co/go/tools/pattern/match.go
+++ b/vendor/honnef.co/go/tools/pattern/match.go
@@ -692,7 +692,8 @@ func (texpr TrulyConstantExpression) Match(m *Matcher, node interface{}) (interf
 
 var (
 	// Types of fields in go/ast structs that we want to skip
-	rtTokPos       = reflect.TypeOf(token.Pos(0))
+	rtTokPos = reflect.TypeOf(token.Pos(0))
+	//lint:ignore SA1019 It's deprecated, but we still want to skip the field.
 	rtObject       = reflect.TypeOf((*ast.Object)(nil))
 	rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil))
 )
diff --git a/vendor/honnef.co/go/tools/simple/s1008/s1008.go b/vendor/honnef.co/go/tools/simple/s1008/s1008.go
index 98cd00dac..e945e3f7f 100644
--- a/vendor/honnef.co/go/tools/simple/s1008/s1008.go
+++ b/vendor/honnef.co/go/tools/simple/s1008/s1008.go
@@ -5,6 +5,7 @@ import (
 	"go/ast"
 	"go/constant"
 	"go/token"
+	"strings"
 
 	"honnef.co/go/tools/analysis/code"
 	"honnef.co/go/tools/analysis/facts/generated"
@@ -42,8 +43,14 @@ var (
 	checkIfReturnQRet = pattern.MustParse(`(ReturnStmt [ret@(Builtin (Or "true" "false"))])`)
 )
 
-func run(pass *analysis.Pass) (interface{}, error) {
+func run(pass *analysis.Pass) (any, error) {
+	var cm ast.CommentMap
 	fn := func(node ast.Node) {
+		if f, ok := node.(*ast.File); ok {
+			cm = ast.NewCommentMap(pass.Fset, f, f.Comments)
+			return
+		}
+
 		block := node.(*ast.BlockStmt)
 		l := len(block.List)
 		if l < 2 {
@@ -76,13 +83,35 @@ func run(pass *analysis.Pass) (interface{}, error) {
 
 		ret1 := m1.State["ret"].(*ast.Ident)
 		ret2 := m2.State["ret"].(*ast.Ident)
-
 		if ret1.Name == ret2.Name {
 			// we want the function to return true and false, not the
 			// same value both times.
 			return
 		}
 
+		hasComments := func(n ast.Node) bool {
+			cmf := cm.Filter(n)
+			for _, groups := range cmf {
+				for _, group := range groups {
+					for _, cmt := range group.List {
+						if strings.HasPrefix(cmt.Text, "//@ diag") {
+							// Staticcheck test cases use comments to mark
+							// expected diagnostics. Ignore these comments so we
+							// can test this check.
+							continue
+						}
+						return true
+					}
+				}
+			}
+			return false
+		}
+
+		// Don't flag if either branch is commented
+		if hasComments(n1) || hasComments(n2) {
+			return
+		}
+
 		cond := m1.State["cond"].(ast.Expr)
 		origCond := cond
 		if ret1.Name == "false" {
@@ -94,7 +123,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
 				report.Render(pass, origCond), report.Render(pass, ret1), report.Render(pass, ret2)),
 			report.FilterGenerated())
 	}
-	code.Preorder(pass, fn, (*ast.BlockStmt)(nil))
+	code.Preorder(pass, fn, (*ast.File)(nil), (*ast.BlockStmt)(nil))
 	return nil, nil
 }
 
diff --git a/vendor/honnef.co/go/tools/simple/s1009/s1009.go b/vendor/honnef.co/go/tools/simple/s1009/s1009.go
index afe5c3c14..270215c74 100644
--- a/vendor/honnef.co/go/tools/simple/s1009/s1009.go
+++ b/vendor/honnef.co/go/tools/simple/s1009/s1009.go
@@ -41,6 +41,8 @@ var Analyzer = SCAnalyzer.Analyzer
 // run checks for the following redundant nil-checks:
 //
 //	if x == nil || len(x) == 0 {}
+//	if x == nil || len(x) < N {} (where N != 0)
+//	if x == nil || len(x) <= N {}
 //	if x != nil && len(x) != 0 {}
 //	if x != nil && len(x) == N {} (where N != 0)
 //	if x != nil && len(x) > N {}
@@ -99,9 +101,6 @@ func run(pass *analysis.Pass) (interface{}, error) {
 		if !ok {
 			return
 		}
-		if eqNil && y.Op != token.EQL { // must be len(xx) *==* 0
-			return
-		}
 		yx, ok := y.X.(*ast.CallExpr)
 		if !ok {
 			return
@@ -122,15 +121,31 @@ func run(pass *analysis.Pass) (interface{}, error) {
 			return
 		}
 
-		if eqNil && !code.IsIntegerLiteral(pass, y.Y, constant.MakeInt64(0)) { // must be len(x) == *0*
+		isConst, isZero := isConstZero(y.Y)
+		if !isConst {
 			return
 		}
 
-		if !eqNil {
-			isConst, isZero := isConstZero(y.Y)
-			if !isConst {
+		if eqNil {
+			switch y.Op {
+			case token.EQL:
+				// avoid false positive for "xx == nil || len(xx) == "
+				if !isZero {
+					return
+				}
+			case token.LEQ:
+				// ok
+			case token.LSS:
+				// avoid false positive for "xx == nil || len(xx) < 0"
+				if isZero {
+					return
+				}
+			default:
 				return
 			}
+		}
+
+		if !eqNil {
 			switch y.Op {
 			case token.EQL:
 				// avoid false positive for "xx != nil && len(xx) == 0"
@@ -157,13 +172,17 @@ func run(pass *analysis.Pass) (interface{}, error) {
 		// finally check that xx type is one of array, slice, map or chan
 		// this is to prevent false positive in case if xx is a pointer to an array
 		typ := pass.TypesInfo.TypeOf(xx)
+		var nilType string
 		ok = typeutil.All(typ, func(term *types.Term) bool {
 			switch term.Type().Underlying().(type) {
 			case *types.Slice:
+				nilType = "nil slices"
 				return true
 			case *types.Map:
+				nilType = "nil maps"
 				return true
 			case *types.Chan:
+				nilType = "nil channels"
 				return true
 			case *types.Pointer:
 				return false
@@ -178,7 +197,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
 			return
 		}
 
-		report.Report(pass, expr, fmt.Sprintf("should omit nil check; len() for %s is defined as zero", typ), report.FilterGenerated())
+		report.Report(pass, expr, fmt.Sprintf("should omit nil check; len() for %s is defined as zero", nilType), report.FilterGenerated())
 	}
 	code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
 	return nil, nil
diff --git a/vendor/honnef.co/go/tools/staticcheck/sa4023/sa4023.go b/vendor/honnef.co/go/tools/staticcheck/sa4023/sa4023.go
index 991d4f819..6546ffa76 100644
--- a/vendor/honnef.co/go/tools/staticcheck/sa4023/sa4023.go
+++ b/vendor/honnef.co/go/tools/staticcheck/sa4023/sa4023.go
@@ -79,7 +79,7 @@ Similar situations to those described here can arise whenever
 interfaces are used. Just keep in mind that if any concrete value
 has been stored in the interface, the interface will not be nil.
 For more information, see The Laws of
-Reflection (https://golang.org/doc/articles/laws_of_reflection.html).
+Reflection at https://golang.org/doc/articles/laws_of_reflection.html.
 
 This text has been copied from
 https://golang.org/doc/faq#nil_error, licensed under the Creative
diff --git a/vendor/honnef.co/go/tools/staticcheck/sa9007/sa9007.go b/vendor/honnef.co/go/tools/staticcheck/sa9007/sa9007.go
index d10e5e7e8..685e1e134 100644
--- a/vendor/honnef.co/go/tools/staticcheck/sa9007/sa9007.go
+++ b/vendor/honnef.co/go/tools/staticcheck/sa9007/sa9007.go
@@ -24,7 +24,7 @@ var SCAnalyzer = lint.InitializeAnalyzer(&lint.Analyzer{
 		Text: `
 It is virtually never correct to delete system directories such as
 /tmp or the user's home directory. However, it can be fairly easy to
-do by mistake, for example by mistakingly using \'os.TempDir\' instead
+do by mistake, for example by mistakenly using \'os.TempDir\' instead
 of \'ioutil.TempDir\', or by forgetting to add a suffix to the result
 of \'os.UserHomeDir\'.
 
diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go
index 46c147e1a..e05cef0a9 100644
--- a/vendor/honnef.co/go/tools/unused/unused.go
+++ b/vendor/honnef.co/go/tools/unused/unused.go
@@ -94,6 +94,7 @@ This overview is true when using the default options. Different options may chan
   - (6.3) embedded fields that help implement interfaces (either fully implements it, or contributes required methods) (recursively)
   - (6.4) embedded fields that have exported methods (recursively)
   - (6.5) embedded structs that have exported fields (recursively)
+  - (6.6) all fields if they have a structs.HostLayout field
 
 - (7.1) field accesses use fields
 - (7.2) fields use their types
@@ -887,7 +888,7 @@ func (g *graph) read(node ast.Node, by types.Object) {
 			g.read(arg, by)
 		}
 
-		// Handle conversiosn
+		// Handle conversions
 		conv := node
 		if len(conv.Args) != 1 || conv.Ellipsis.IsValid() {
 			return
@@ -1496,8 +1497,11 @@ func (g *graph) namedType(typ *types.TypeName, spec ast.Expr) {
 	// (2.2) named types use the type they're based on
 
 	if st, ok := spec.(*ast.StructType); ok {
-		// Named structs are special in that its unexported fields are only used if they're being written to. That is,
-		// the fields are not used by the named type itself, nor are the types of the fields.
+		var hasHostLayout bool
+
+		// Named structs are special in that their unexported fields are only
+		// used if they're being written to. That is, the fields are not used by
+		// the named type itself, nor are the types of the fields.
 		for _, field := range st.Fields.List {
 			seen := map[*types.Struct]struct{}{}
 			// For `type x struct { *x; F int }`, don't visit the embedded x
@@ -1558,6 +1562,40 @@ func (g *graph) namedType(typ *types.TypeName, spec ast.Expr) {
 				}
 			}
 
+			// (6.6) if the struct has a field of type structs.HostLayout, then
+			// this signals that all fields are relevant to match some
+			// externally specified memory layout.
+			//
+			// This augments the 5.2 heuristic of using all fields when
+			// converting via unsafe.Pointer. For example, 5.2 doesn't currently
+			// handle conversions involving more than one level of pointer
+			// indirection (although it probably should). Another example that
+			// doesn't involve the use of unsafe at all is exporting symbols for
+			// use by C libraries.
+			//
+			// The actual requirements for the use of structs.HostLayout fields
+			// haven't been determined yet. It's an open question whether named
+			// types of underlying type structs.HostLayout, aliases of it,
+			// generic instantiations, or embedding structs that themselves
+			// contain a HostLayout field count as valid uses of the marker (see
+			// https://golang.org/issues/66408#issuecomment-2120644459)
+			//
+			// For now, we require a struct to have a field of type
+			// structs.HostLayout or an alias of it, where the field itself may
+			// be embedded. We don't handle fields whose types are type
+			// parameters.
+			fieldType := types.Unalias(g.info.TypeOf(field.Type))
+			if fieldType, ok := fieldType.(*types.Named); ok {
+				obj := fieldType.Obj()
+				if obj.Name() == "HostLayout" && obj.Pkg().Path() == "structs" {
+					hasHostLayout = true
+				}
+			}
+		}
+
+		// For 6.6.
+		if hasHostLayout {
+			g.useAllFieldsRecursively(typ.Type(), typ)
 		}
 	} else {
 		g.read(spec, typ)
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
index 261ae41bd..bf1ae5948 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
@@ -25,6 +25,7 @@ import (
 	io "io"
 
 	proto "github.com/gogo/protobuf/proto"
+	k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1"
 	v11 "k8s.io/api/admissionregistration/v1"
 	k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -46,10 +47,38 @@ var _ = math.Inf
 // proto package needs to be updated.
 const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
+func (m *ApplyConfiguration) Reset()      { *m = ApplyConfiguration{} }
+func (*ApplyConfiguration) ProtoMessage() {}
+func (*ApplyConfiguration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{0}
+}
+func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ApplyConfiguration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ApplyConfiguration.Merge(m, src)
+}
+func (m *ApplyConfiguration) XXX_Size() int {
+	return m.Size()
+}
+func (m *ApplyConfiguration) XXX_DiscardUnknown() {
+	xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo
+
 func (m *AuditAnnotation) Reset()      { *m = AuditAnnotation{} }
 func (*AuditAnnotation) ProtoMessage() {}
 func (*AuditAnnotation) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{0}
+	return fileDescriptor_7f7c65a4f012fb19, []int{1}
 }
 func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -77,7 +106,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
 func (m *ExpressionWarning) Reset()      { *m = ExpressionWarning{} }
 func (*ExpressionWarning) ProtoMessage() {}
 func (*ExpressionWarning) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{1}
+	return fileDescriptor_7f7c65a4f012fb19, []int{2}
 }
 func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -102,10 +131,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
 
+func (m *JSONPatch) Reset()      { *m = JSONPatch{} }
+func (*JSONPatch) ProtoMessage() {}
+func (*JSONPatch) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{3}
+}
+func (m *JSONPatch) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *JSONPatch) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_JSONPatch.Merge(m, src)
+}
+func (m *JSONPatch) XXX_Size() int {
+	return m.Size()
+}
+func (m *JSONPatch) XXX_DiscardUnknown() {
+	xxx_messageInfo_JSONPatch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_JSONPatch proto.InternalMessageInfo
+
 func (m *MatchCondition) Reset()      { *m = MatchCondition{} }
 func (*MatchCondition) ProtoMessage() {}
 func (*MatchCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{2}
+	return fileDescriptor_7f7c65a4f012fb19, []int{4}
 }
 func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -133,7 +190,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
 func (m *MatchResources) Reset()      { *m = MatchResources{} }
 func (*MatchResources) ProtoMessage() {}
 func (*MatchResources) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{3}
+	return fileDescriptor_7f7c65a4f012fb19, []int{5}
 }
 func (m *MatchResources) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -158,10 +215,178 @@ func (m *MatchResources) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_MatchResources proto.InternalMessageInfo
 
+func (m *MutatingAdmissionPolicy) Reset()      { *m = MutatingAdmissionPolicy{} }
+func (*MutatingAdmissionPolicy) ProtoMessage() {}
+func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{6}
+}
+func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicy) XXX_Size() int {
+	return m.Size()
+}
+func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() {
+	xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBinding) Reset()      { *m = MutatingAdmissionPolicyBinding{} }
+func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{7}
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_Size() int {
+	return m.Size()
+}
+func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
+	xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBindingList) Reset()      { *m = MutatingAdmissionPolicyBindingList{} }
+func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{8}
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int {
+	return m.Size()
+}
+func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
+	xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyBindingSpec) Reset()      { *m = MutatingAdmissionPolicyBindingSpec{} }
+func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{9}
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicyList) Reset()      { *m = MutatingAdmissionPolicyList{} }
+func (*MutatingAdmissionPolicyList) ProtoMessage() {}
+func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{10}
+}
+func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicyList) XXX_Size() int {
+	return m.Size()
+}
+func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() {
+	xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo
+
+func (m *MutatingAdmissionPolicySpec) Reset()      { *m = MutatingAdmissionPolicySpec{} }
+func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
+func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{11}
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src)
+}
+func (m *MutatingAdmissionPolicySpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo
+
 func (m *MutatingWebhook) Reset()      { *m = MutatingWebhook{} }
 func (*MutatingWebhook) ProtoMessage() {}
 func (*MutatingWebhook) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{4}
+	return fileDescriptor_7f7c65a4f012fb19, []int{12}
 }
 func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -189,7 +414,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
 func (m *MutatingWebhookConfiguration) Reset()      { *m = MutatingWebhookConfiguration{} }
 func (*MutatingWebhookConfiguration) ProtoMessage() {}
 func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{5}
+	return fileDescriptor_7f7c65a4f012fb19, []int{13}
 }
 func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -217,7 +442,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
 func (m *MutatingWebhookConfigurationList) Reset()      { *m = MutatingWebhookConfigurationList{} }
 func (*MutatingWebhookConfigurationList) ProtoMessage() {}
 func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{6}
+	return fileDescriptor_7f7c65a4f012fb19, []int{14}
 }
 func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -242,10 +467,38 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
 
+func (m *Mutation) Reset()      { *m = Mutation{} }
+func (*Mutation) ProtoMessage() {}
+func (*Mutation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7f7c65a4f012fb19, []int{15}
+}
+func (m *Mutation) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Mutation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Mutation.Merge(m, src)
+}
+func (m *Mutation) XXX_Size() int {
+	return m.Size()
+}
+func (m *Mutation) XXX_DiscardUnknown() {
+	xxx_messageInfo_Mutation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Mutation proto.InternalMessageInfo
+
 func (m *NamedRuleWithOperations) Reset()      { *m = NamedRuleWithOperations{} }
 func (*NamedRuleWithOperations) ProtoMessage() {}
 func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{7}
+	return fileDescriptor_7f7c65a4f012fb19, []int{16}
 }
 func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -273,7 +526,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
 func (m *ParamKind) Reset()      { *m = ParamKind{} }
 func (*ParamKind) ProtoMessage() {}
 func (*ParamKind) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{8}
+	return fileDescriptor_7f7c65a4f012fb19, []int{17}
 }
 func (m *ParamKind) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -301,7 +554,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo
 func (m *ParamRef) Reset()      { *m = ParamRef{} }
 func (*ParamRef) ProtoMessage() {}
 func (*ParamRef) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{9}
+	return fileDescriptor_7f7c65a4f012fb19, []int{18}
 }
 func (m *ParamRef) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -329,7 +582,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo
 func (m *ServiceReference) Reset()      { *m = ServiceReference{} }
 func (*ServiceReference) ProtoMessage() {}
 func (*ServiceReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{10}
+	return fileDescriptor_7f7c65a4f012fb19, []int{19}
 }
 func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -357,7 +610,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
 func (m *TypeChecking) Reset()      { *m = TypeChecking{} }
 func (*TypeChecking) ProtoMessage() {}
 func (*TypeChecking) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{11}
+	return fileDescriptor_7f7c65a4f012fb19, []int{20}
 }
 func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -385,7 +638,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
 func (m *ValidatingAdmissionPolicy) Reset()      { *m = ValidatingAdmissionPolicy{} }
 func (*ValidatingAdmissionPolicy) ProtoMessage() {}
 func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{12}
+	return fileDescriptor_7f7c65a4f012fb19, []int{21}
 }
 func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -413,7 +666,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
 func (m *ValidatingAdmissionPolicyBinding) Reset()      { *m = ValidatingAdmissionPolicyBinding{} }
 func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
 func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{13}
+	return fileDescriptor_7f7c65a4f012fb19, []int{22}
 }
 func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -441,7 +694,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
 func (m *ValidatingAdmissionPolicyBindingList) Reset()      { *m = ValidatingAdmissionPolicyBindingList{} }
 func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
 func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{14}
+	return fileDescriptor_7f7c65a4f012fb19, []int{23}
 }
 func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -469,7 +722,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn
 func (m *ValidatingAdmissionPolicyBindingSpec) Reset()      { *m = ValidatingAdmissionPolicyBindingSpec{} }
 func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
 func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{15}
+	return fileDescriptor_7f7c65a4f012fb19, []int{24}
 }
 func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -497,7 +750,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn
 func (m *ValidatingAdmissionPolicyList) Reset()      { *m = ValidatingAdmissionPolicyList{} }
 func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
 func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{16}
+	return fileDescriptor_7f7c65a4f012fb19, []int{25}
 }
 func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -525,7 +778,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
 func (m *ValidatingAdmissionPolicySpec) Reset()      { *m = ValidatingAdmissionPolicySpec{} }
 func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
 func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{17}
+	return fileDescriptor_7f7c65a4f012fb19, []int{26}
 }
 func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -553,7 +806,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
 func (m *ValidatingAdmissionPolicyStatus) Reset()      { *m = ValidatingAdmissionPolicyStatus{} }
 func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
 func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{18}
+	return fileDescriptor_7f7c65a4f012fb19, []int{27}
 }
 func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -581,7 +834,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
 func (m *ValidatingWebhook) Reset()      { *m = ValidatingWebhook{} }
 func (*ValidatingWebhook) ProtoMessage() {}
 func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{19}
+	return fileDescriptor_7f7c65a4f012fb19, []int{28}
 }
 func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -609,7 +862,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
 func (m *ValidatingWebhookConfiguration) Reset()      { *m = ValidatingWebhookConfiguration{} }
 func (*ValidatingWebhookConfiguration) ProtoMessage() {}
 func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{20}
+	return fileDescriptor_7f7c65a4f012fb19, []int{29}
 }
 func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -637,7 +890,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
 func (m *ValidatingWebhookConfigurationList) Reset()      { *m = ValidatingWebhookConfigurationList{} }
 func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
 func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{21}
+	return fileDescriptor_7f7c65a4f012fb19, []int{30}
 }
 func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -665,7 +918,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
 func (m *Validation) Reset()      { *m = Validation{} }
 func (*Validation) ProtoMessage() {}
 func (*Validation) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{22}
+	return fileDescriptor_7f7c65a4f012fb19, []int{31}
 }
 func (m *Validation) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -693,7 +946,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo
 func (m *Variable) Reset()      { *m = Variable{} }
 func (*Variable) ProtoMessage() {}
 func (*Variable) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{23}
+	return fileDescriptor_7f7c65a4f012fb19, []int{32}
 }
 func (m *Variable) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -721,7 +974,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo
 func (m *WebhookClientConfig) Reset()      { *m = WebhookClientConfig{} }
 func (*WebhookClientConfig) ProtoMessage() {}
 func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_7f7c65a4f012fb19, []int{24}
+	return fileDescriptor_7f7c65a4f012fb19, []int{33}
 }
 func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -747,13 +1000,22 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() {
 var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
 
 func init() {
+	proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ApplyConfiguration")
 	proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation")
 	proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning")
+	proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1beta1.JSONPatch")
 	proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition")
 	proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources")
+	proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicy")
+	proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding")
+	proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList")
+	proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec")
+	proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList")
+	proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec")
 	proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook")
 	proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration")
 	proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList")
+	proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1beta1.Mutation")
 	proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations")
 	proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind")
 	proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef")
@@ -779,130 +1041,174 @@ func init() {
 }
 
 var fileDescriptor_7f7c65a4f012fb19 = []byte{
-	// 1957 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7,
-	0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85,
-	0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30,
-	0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b,
-	0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed,
-	0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2,
-	0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f,
-	0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde,
-	0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f,
-	0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b,
-	0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36,
-	0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0,
-	0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd,
-	0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a,
-	0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c,
-	0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8,
-	0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb,
-	0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1,
-	0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4,
-	0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38,
-	0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48,
-	0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20,
-	0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6,
-	0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e,
-	0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22,
-	0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9,
-	0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5,
-	0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b,
-	0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88,
-	0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d,
-	0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e,
-	0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7,
-	0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7,
-	0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e,
-	0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa,
-	0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20,
-	0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36,
-	0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0,
-	0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b,
-	0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a,
-	0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58,
-	0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18,
-	0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19,
-	0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e,
-	0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad,
-	0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e,
-	0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c,
-	0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d,
-	0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e,
-	0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43,
-	0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0,
-	0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee,
-	0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9,
-	0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c,
-	0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc,
-	0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd,
-	0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20,
-	0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5,
-	0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8,
-	0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6,
-	0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2,
-	0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90,
-	0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81,
-	0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec,
-	0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba,
-	0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf,
-	0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3,
-	0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d,
-	0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d,
-	0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39,
-	0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51,
-	0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef,
-	0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97,
-	0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02,
-	0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01,
-	0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18,
-	0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22,
-	0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd,
-	0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40,
-	0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73,
-	0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3,
-	0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3,
-	0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3,
-	0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02,
-	0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d,
-	0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72,
-	0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c,
-	0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c,
-	0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c,
-	0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88,
-	0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3,
-	0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85,
-	0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73,
-	0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd,
-	0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3,
-	0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11,
-	0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf,
-	0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b,
-	0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38,
-	0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7,
-	0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b,
-	0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa,
-	0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa,
-	0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85,
-	0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d,
-	0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8,
-	0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb,
-	0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7,
-	0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11,
-	0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f,
-	0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a,
-	0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96,
-	0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b,
-	0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5,
-	0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25,
-	0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98,
-	0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56,
-	0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf,
-	0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae,
-	0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8,
-	0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57,
-	0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8,
-	0x4a, 0x10, 0x21, 0x00, 0x00,
+	// 2215 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7,
+	0x15, 0xf6, 0x92, 0x92, 0x45, 0x3e, 0xca, 0x92, 0x38, 0x71, 0x2a, 0xfa, 0x8f, 0x14, 0x16, 0x41,
+	0x21, 0x03, 0x2d, 0x59, 0x2b, 0x41, 0xe2, 0x3a, 0x29, 0x02, 0xae, 0x62, 0x3b, 0x76, 0x24, 0x59,
+	0x18, 0x39, 0x52, 0xd1, 0x26, 0x40, 0x56, 0xcb, 0x21, 0xb9, 0x11, 0xb9, 0xcb, 0xee, 0x2c, 0x65,
+	0xab, 0x05, 0xda, 0x02, 0x2d, 0x90, 0x1e, 0x0b, 0xf4, 0x52, 0xa0, 0xa7, 0xde, 0x7b, 0x69, 0xef,
+	0x05, 0x7a, 0xf4, 0x31, 0xb7, 0x1a, 0x28, 0x4a, 0x54, 0x4c, 0xd1, 0x9e, 0x7a, 0x48, 0x81, 0xf6,
+	0xa0, 0x4b, 0x8b, 0x99, 0x9d, 0xfd, 0xdf, 0x95, 0x56, 0xb2, 0x2c, 0x17, 0x85, 0x6f, 0xda, 0xf7,
+	0xe6, 0xbd, 0x37, 0xef, 0xcd, 0x9b, 0xf7, 0xbe, 0x79, 0x22, 0xdc, 0xdc, 0xb9, 0x49, 0xeb, 0xba,
+	0xd9, 0x50, 0x07, 0x7a, 0x43, 0x6d, 0xf5, 0x75, 0x4a, 0x75, 0xd3, 0xb0, 0x48, 0x47, 0xa7, 0xb6,
+	0xa5, 0xda, 0xba, 0x69, 0x34, 0x76, 0x6f, 0x6c, 0x13, 0x5b, 0xbd, 0xd1, 0xe8, 0x10, 0x83, 0x58,
+	0xaa, 0x4d, 0x5a, 0xf5, 0x81, 0x65, 0xda, 0x26, 0x5a, 0x74, 0x24, 0xeb, 0xea, 0x40, 0xaf, 0x27,
+	0x4a, 0xd6, 0x85, 0xe4, 0xe5, 0xaf, 0x77, 0x74, 0xbb, 0x3b, 0xdc, 0xae, 0x6b, 0x66, 0xbf, 0xd1,
+	0x31, 0x3b, 0x66, 0x83, 0x2b, 0xd8, 0x1e, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x39, 0x8a, 0x2f,
+	0xbf, 0x9e, 0x61, 0x4b, 0xd1, 0xdd, 0x5c, 0x7e, 0xc3, 0x17, 0xea, 0xab, 0x5a, 0x57, 0x37, 0x88,
+	0xb5, 0xd7, 0x18, 0xec, 0x74, 0x18, 0x81, 0x36, 0xfa, 0xc4, 0x56, 0x93, 0xa4, 0x1a, 0x69, 0x52,
+	0xd6, 0xd0, 0xb0, 0xf5, 0x3e, 0x89, 0x09, 0xbc, 0x79, 0x94, 0x00, 0xd5, 0xba, 0xa4, 0xaf, 0x46,
+	0xe5, 0xe4, 0xf7, 0x01, 0x35, 0x07, 0x83, 0xde, 0xde, 0xb2, 0x69, 0xb4, 0xf5, 0xce, 0xd0, 0xf1,
+	0x03, 0x2d, 0x01, 0x90, 0xc7, 0x03, 0x8b, 0x70, 0x0f, 0x2b, 0xd2, 0x82, 0xb4, 0x58, 0x54, 0xd0,
+	0x93, 0x51, 0xed, 0xdc, 0x78, 0x54, 0x83, 0xdb, 0x1e, 0x07, 0x07, 0x56, 0xc9, 0x14, 0x66, 0x9b,
+	0xc3, 0x96, 0x6e, 0x37, 0x0d, 0xc3, 0xb4, 0x1d, 0x35, 0xd7, 0x20, 0xbf, 0x43, 0xf6, 0x84, 0x7c,
+	0x49, 0xc8, 0xe7, 0x3f, 0x20, 0x7b, 0x98, 0xd1, 0x51, 0x13, 0x66, 0x77, 0xd5, 0xde, 0x90, 0xf8,
+	0x0a, 0x2b, 0x39, 0xbe, 0x74, 0x5e, 0x2c, 0x9d, 0xdd, 0x0c, 0xb3, 0x71, 0x74, 0xbd, 0xdc, 0x83,
+	0xb2, 0xff, 0xb5, 0xa5, 0x5a, 0x86, 0x6e, 0x74, 0xd0, 0xd7, 0xa0, 0xd0, 0xd6, 0x49, 0xaf, 0x85,
+	0x49, 0x5b, 0x28, 0x9c, 0x13, 0x0a, 0x0b, 0x77, 0x04, 0x1d, 0x7b, 0x2b, 0xd0, 0x75, 0x98, 0x7a,
+	0xe4, 0x08, 0x56, 0xf2, 0x7c, 0xf1, 0xac, 0x58, 0x3c, 0x25, 0xf4, 0x61, 0x97, 0x2f, 0xbf, 0x0b,
+	0xc5, 0xfb, 0x1b, 0x0f, 0xd6, 0xd6, 0x55, 0x5b, 0xeb, 0x9e, 0x28, 0x46, 0x6d, 0x98, 0x59, 0x65,
+	0xc2, 0xcb, 0xa6, 0xd1, 0xd2, 0x79, 0x88, 0x16, 0x60, 0xc2, 0x50, 0xfb, 0x44, 0xc8, 0x4f, 0x0b,
+	0xf9, 0x89, 0x35, 0xb5, 0x4f, 0x30, 0xe7, 0x44, 0xec, 0xe4, 0x32, 0xd9, 0xf9, 0xe3, 0x84, 0x30,
+	0x84, 0x09, 0x35, 0x87, 0x96, 0x46, 0x28, 0x7a, 0x0c, 0x65, 0xa6, 0x8e, 0x0e, 0x54, 0x8d, 0x6c,
+	0x90, 0x1e, 0xd1, 0x6c, 0xd3, 0xe2, 0x56, 0x4b, 0x4b, 0xaf, 0xd7, 0xfd, 0x1b, 0xe3, 0x25, 0x4f,
+	0x7d, 0xb0, 0xd3, 0x61, 0x04, 0x5a, 0x67, 0x39, 0x5a, 0xdf, 0xbd, 0x51, 0x5f, 0x51, 0xb7, 0x49,
+	0xcf, 0x15, 0x55, 0x5e, 0x1d, 0x8f, 0x6a, 0xe5, 0xb5, 0xa8, 0x46, 0x1c, 0x37, 0x82, 0x4c, 0x98,
+	0x31, 0xb7, 0x3f, 0x25, 0x9a, 0xed, 0x99, 0xcd, 0x9d, 0xdc, 0x2c, 0x1a, 0x8f, 0x6a, 0x33, 0x0f,
+	0x42, 0xea, 0x70, 0x44, 0x3d, 0xfa, 0x21, 0x5c, 0xb0, 0x84, 0xdf, 0x78, 0xd8, 0x23, 0xb4, 0x92,
+	0x5f, 0xc8, 0x2f, 0x96, 0x96, 0x9a, 0xf5, 0xac, 0x85, 0xa1, 0xce, 0xfc, 0x6a, 0x31, 0xd9, 0x2d,
+	0xdd, 0xee, 0x3e, 0x18, 0x10, 0x87, 0x4d, 0x95, 0x57, 0x45, 0xdc, 0x2f, 0xe0, 0xa0, 0x7e, 0x1c,
+	0x36, 0x87, 0x7e, 0x21, 0xc1, 0x45, 0xf2, 0x58, 0xeb, 0x0d, 0x5b, 0x24, 0xb4, 0xae, 0x32, 0x71,
+	0x5a, 0xfb, 0xb8, 0x2a, 0xf6, 0x71, 0xf1, 0x76, 0x82, 0x19, 0x9c, 0x68, 0x1c, 0xbd, 0x07, 0xa5,
+	0x3e, 0x4b, 0x89, 0x75, 0xb3, 0xa7, 0x6b, 0x7b, 0x95, 0x29, 0x9e, 0x48, 0xf2, 0x78, 0x54, 0x2b,
+	0xad, 0xfa, 0xe4, 0x83, 0x51, 0x6d, 0x36, 0xf0, 0xf9, 0x70, 0x6f, 0x40, 0x70, 0x50, 0x4c, 0xfe,
+	0xab, 0x04, 0xf3, 0xab, 0x43, 0x76, 0xbf, 0x8d, 0x4e, 0xd3, 0xdd, 0xbb, 0xc3, 0x43, 0x9f, 0x40,
+	0x81, 0x1d, 0x5a, 0x4b, 0xb5, 0x55, 0x91, 0x59, 0xdf, 0xc8, 0x76, 0xc4, 0xce, 0x79, 0xae, 0x12,
+	0x5b, 0xf5, 0x33, 0xdb, 0xa7, 0x61, 0x4f, 0x2b, 0xea, 0xc0, 0x04, 0x1d, 0x10, 0x4d, 0x24, 0xd0,
+	0xed, 0xec, 0x81, 0x4c, 0xd9, 0xf2, 0xc6, 0x80, 0x68, 0xfe, 0xa5, 0x63, 0x5f, 0x98, 0x1b, 0x90,
+	0xff, 0x29, 0x41, 0x35, 0x45, 0x46, 0xd1, 0x8d, 0x16, 0xab, 0x32, 0xcf, 0xdf, 0x5b, 0x23, 0xe4,
+	0xed, 0xca, 0x33, 0x7b, 0x2b, 0x76, 0x9e, 0xea, 0xf4, 0x97, 0x12, 0xc8, 0x87, 0x8b, 0xae, 0xe8,
+	0xd4, 0x46, 0x1f, 0xc5, 0x1c, 0xaf, 0x67, 0xbc, 0xc9, 0x3a, 0x75, 0xdc, 0xf6, 0xca, 0xb1, 0x4b,
+	0x09, 0x38, 0xdd, 0x87, 0x49, 0xdd, 0x26, 0x7d, 0x5a, 0xc9, 0xf1, 0xcb, 0xf2, 0xfe, 0x69, 0x79,
+	0xad, 0x5c, 0x10, 0x46, 0x27, 0xef, 0x31, 0xf5, 0xd8, 0xb1, 0x22, 0xff, 0x26, 0x77, 0x94, 0xcf,
+	0x2c, 0x40, 0xac, 0x08, 0x0f, 0x38, 0x71, 0xcd, 0x2f, 0xd6, 0xde, 0xe1, 0xad, 0x7b, 0x1c, 0x1c,
+	0x58, 0xc5, 0xe2, 0x34, 0x50, 0x2d, 0xb5, 0xef, 0xb6, 0xa1, 0xd2, 0xd2, 0x52, 0x76, 0x67, 0xd6,
+	0x85, 0xa4, 0x32, 0xcd, 0xe2, 0xe4, 0x7e, 0x61, 0x4f, 0x23, 0xb2, 0x61, 0xa6, 0x1f, 0xaa, 0xf0,
+	0xbc, 0x7b, 0x95, 0x96, 0x6e, 0x1e, 0x23, 0x60, 0x21, 0x79, 0xa7, 0xb4, 0x86, 0x69, 0x38, 0x62,
+	0x43, 0xfe, 0x42, 0x82, 0x2b, 0x29, 0xe1, 0x3a, 0x83, 0xdc, 0x68, 0x87, 0x73, 0xa3, 0xf9, 0xec,
+	0xb9, 0x91, 0x9c, 0x14, 0xbf, 0x3a, 0x9f, 0xea, 0x25, 0xcf, 0x86, 0x4f, 0xa0, 0xc8, 0xcf, 0xe1,
+	0x03, 0xdd, 0x68, 0x25, 0xf4, 0xd0, 0x2c, 0x47, 0xcb, 0x44, 0x95, 0x0b, 0xe3, 0x51, 0xad, 0xe8,
+	0x7d, 0x62, 0x5f, 0x29, 0xfa, 0x3e, 0xcc, 0xf5, 0x05, 0x50, 0x60, 0xf2, 0xba, 0x61, 0x53, 0x91,
+	0x43, 0x27, 0x3f, 0xdf, 0x8b, 0xe3, 0x51, 0x6d, 0x6e, 0x35, 0xa2, 0x15, 0xc7, 0xec, 0x20, 0x0d,
+	0x8a, 0xbb, 0xaa, 0xa5, 0xab, 0xdb, 0x7e, 0xeb, 0x3c, 0x46, 0xe2, 0x6e, 0x0a, 0x51, 0xa5, 0x2c,
+	0x42, 0x5b, 0x74, 0x29, 0x14, 0xfb, 0x7a, 0x99, 0x91, 0xfe, 0xd0, 0x81, 0x89, 0x6e, 0x5f, 0x5c,
+	0x3a, 0xee, 0x71, 0x9a, 0x86, 0x6f, 0xc4, 0xa5, 0x50, 0xec, 0xeb, 0x45, 0x2b, 0x70, 0xa1, 0xad,
+	0xea, 0xbd, 0xa1, 0x45, 0x44, 0xd3, 0x9b, 0xe4, 0x17, 0xf7, 0xab, 0xac, 0x83, 0xdf, 0x09, 0x32,
+	0x0e, 0x46, 0xb5, 0x72, 0x88, 0xc0, 0x1b, 0x5f, 0x58, 0x18, 0xfd, 0x00, 0x66, 0xfb, 0x21, 0xf0,
+	0x46, 0x2b, 0xe7, 0xf9, 0xc6, 0x8f, 0x7b, 0x24, 0x9e, 0x02, 0x1f, 0xe8, 0x86, 0xe9, 0x14, 0x47,
+	0x2d, 0xa1, 0x9f, 0x49, 0x80, 0x2c, 0xa2, 0x1b, 0xbb, 0xa6, 0xc6, 0x35, 0x86, 0xba, 0xf8, 0xb7,
+	0x85, 0x1a, 0x84, 0x63, 0x2b, 0x0e, 0x46, 0xb5, 0x5b, 0x19, 0x9e, 0x2d, 0xf5, 0xb8, 0x24, 0x0f,
+	0x41, 0x82, 0x4d, 0xf9, 0x6f, 0x05, 0x98, 0x75, 0x6f, 0xc7, 0x16, 0xd9, 0xee, 0x9a, 0xe6, 0x4e,
+	0x06, 0x18, 0xfb, 0x08, 0xa6, 0xb5, 0x9e, 0x4e, 0x0c, 0xdb, 0x79, 0x69, 0x88, 0x6c, 0xfe, 0x56,
+	0xf6, 0xd0, 0x09, 0x53, 0xcb, 0x01, 0x25, 0xca, 0x45, 0x61, 0x68, 0x3a, 0x48, 0xc5, 0x21, 0x43,
+	0xe8, 0x23, 0x98, 0xb4, 0x02, 0x28, 0xf0, 0xad, 0x2c, 0x16, 0xeb, 0x09, 0x98, 0xcb, 0x2b, 0x15,
+	0x0e, 0xc8, 0x72, 0x94, 0xc6, 0x53, 0x6c, 0xe2, 0x59, 0x52, 0x2c, 0x82, 0xd1, 0x8a, 0x27, 0xc2,
+	0x68, 0xc9, 0x50, 0x7f, 0xf2, 0xc5, 0x40, 0xfd, 0xd2, 0xf3, 0x85, 0xfa, 0xef, 0x41, 0x89, 0xea,
+	0x2d, 0x72, 0xbb, 0xdd, 0x26, 0x9a, 0xcd, 0xee, 0xa3, 0x17, 0xb0, 0x0d, 0x9f, 0xcc, 0x02, 0xe6,
+	0x7f, 0x2e, 0xf7, 0x54, 0x4a, 0x71, 0x50, 0x0c, 0xdd, 0x82, 0x19, 0xf6, 0x46, 0x36, 0x87, 0xf6,
+	0x06, 0xd1, 0x4c, 0xa3, 0x45, 0xf9, 0xbd, 0x9a, 0x74, 0x76, 0xf0, 0x30, 0xc4, 0xc1, 0x91, 0x95,
+	0xe8, 0x43, 0x98, 0xf7, 0xb2, 0x08, 0x93, 0x5d, 0x9d, 0x3c, 0xda, 0x24, 0x16, 0xe5, 0xd5, 0xa1,
+	0xb0, 0x90, 0x5f, 0x2c, 0x2a, 0x57, 0xc6, 0xa3, 0xda, 0x7c, 0x33, 0x79, 0x09, 0x4e, 0x93, 0x45,
+	0x3f, 0x4d, 0xbe, 0xef, 0xc0, 0x1d, 0x7c, 0x78, 0x56, 0x77, 0x3d, 0xa9, 0xe6, 0x4d, 0x9f, 0x55,
+	0xcd, 0x93, 0xff, 0x2c, 0xc1, 0xd5, 0x48, 0xa1, 0x09, 0x8f, 0x29, 0x9e, 0x3f, 0x04, 0xff, 0x2e,
+	0x14, 0x84, 0x65, 0x17, 0x74, 0x7c, 0xf3, 0xf8, 0xa0, 0x43, 0x68, 0x50, 0x26, 0x98, 0x29, 0xec,
+	0x29, 0x94, 0xff, 0x21, 0xc1, 0xc2, 0x61, 0xfe, 0x9d, 0x01, 0xa2, 0xda, 0x09, 0x23, 0xaa, 0x3b,
+	0x27, 0x76, 0x2e, 0xb4, 0xf1, 0x14, 0x58, 0xf5, 0xdb, 0x1c, 0x14, 0xdc, 0x3e, 0x8d, 0xde, 0x61,
+	0x18, 0xca, 0xd6, 0xba, 0x2c, 0xf5, 0xc4, 0x54, 0xa3, 0xea, 0x36, 0xf3, 0x75, 0x97, 0x71, 0x10,
+	0xfc, 0xc0, 0xbe, 0x00, 0xbf, 0x1e, 0x6a, 0x6c, 0x6e, 0x25, 0x20, 0xf0, 0x3b, 0xd9, 0xbd, 0x88,
+	0xcf, 0xbe, 0x94, 0xaf, 0xb0, 0xcb, 0x15, 0xa7, 0xe3, 0x04, 0x7b, 0x0c, 0x08, 0x7e, 0x4a, 0x4d,
+	0x83, 0x6f, 0x91, 0x57, 0xfe, 0x63, 0x01, 0x41, 0x6f, 0x96, 0xe4, 0x00, 0x41, 0xef, 0x13, 0xfb,
+	0x4a, 0xe5, 0xa7, 0x12, 0xcc, 0xa7, 0x4c, 0x01, 0xd0, 0x5b, 0xfe, 0x9c, 0x83, 0x57, 0xe7, 0x8a,
+	0xc4, 0x0b, 0x4e, 0x39, 0x38, 0xa0, 0xe0, 0x0c, 0x1c, 0x5e, 0x87, 0x7e, 0xc2, 0x8a, 0x4b, 0x4c,
+	0x9f, 0x68, 0xc9, 0x27, 0x6e, 0x90, 0x97, 0x3d, 0x14, 0x12, 0xe3, 0xe1, 0x04, 0x73, 0xb2, 0x0a,
+	0x3e, 0xf6, 0x65, 0x0f, 0x2c, 0x75, 0xa0, 0x8b, 0xf2, 0x17, 0x7d, 0x60, 0x35, 0xd7, 0xef, 0x09,
+	0x0e, 0x0e, 0xac, 0x62, 0xa0, 0x63, 0x87, 0x21, 0xf0, 0x5c, 0x18, 0x74, 0x70, 0x2c, 0xcd, 0x39,
+	0xf2, 0xef, 0x72, 0xe0, 0xbd, 0x9d, 0x32, 0x60, 0x94, 0x06, 0x14, 0xbd, 0x9e, 0x26, 0xb4, 0x7a,
+	0x00, 0xd3, 0xeb, 0x7f, 0xd8, 0x5f, 0x83, 0x3e, 0x86, 0x02, 0x75, 0x3b, 0x5d, 0xfe, 0xe4, 0x9d,
+	0x8e, 0xbf, 0xf1, 0xbc, 0x1e, 0xe7, 0xa9, 0x44, 0x36, 0xcc, 0xf3, 0x27, 0x01, 0xb1, 0x89, 0xb5,
+	0x66, 0xda, 0x77, 0xcc, 0xa1, 0xd1, 0x6a, 0x6a, 0x3c, 0xd3, 0x1d, 0x98, 0x71, 0x8b, 0xf5, 0x96,
+	0xf5, 0xe4, 0x25, 0x07, 0xa3, 0xda, 0x95, 0x14, 0x16, 0xbf, 0x4d, 0x69, 0xaa, 0xe5, 0x5f, 0x4b,
+	0x30, 0xb7, 0x41, 0xac, 0x5d, 0x5d, 0x23, 0x98, 0xb4, 0x89, 0x45, 0x0c, 0x2d, 0x12, 0x1a, 0x29,
+	0x43, 0x68, 0xdc, 0x68, 0xe7, 0x52, 0xa3, 0x7d, 0x15, 0x26, 0x06, 0xaa, 0xdd, 0x15, 0x53, 0xd7,
+	0x02, 0xe3, 0xae, 0xab, 0x76, 0x17, 0x73, 0x2a, 0xe7, 0x9a, 0x96, 0xcd, 0x1d, 0x9d, 0x14, 0x5c,
+	0xd3, 0xb2, 0x31, 0xa7, 0xca, 0xbf, 0x94, 0x60, 0x9a, 0x79, 0xb1, 0xdc, 0x25, 0xda, 0x8e, 0x6e,
+	0x74, 0xd0, 0x67, 0x12, 0x20, 0x12, 0x9d, 0x04, 0x3b, 0x37, 0xa2, 0xb4, 0xf4, 0x76, 0xf6, 0x3b,
+	0x19, 0x9b, 0x26, 0xfb, 0x69, 0x1d, 0x63, 0x51, 0x9c, 0x60, 0x52, 0xfe, 0x53, 0x0e, 0x2e, 0x6d,
+	0xaa, 0x3d, 0xbd, 0xf5, 0x82, 0x66, 0x64, 0x7a, 0x68, 0x6a, 0x74, 0xf7, 0x38, 0x2f, 0xb7, 0x94,
+	0x4d, 0xa7, 0x0d, 0x8c, 0xd0, 0xf7, 0xe0, 0x3c, 0xb5, 0x55, 0x7b, 0xe8, 0xce, 0x1e, 0xee, 0x9d,
+	0x86, 0x31, 0xae, 0x50, 0x99, 0x11, 0xe6, 0xce, 0x3b, 0xdf, 0x58, 0x18, 0x92, 0xff, 0x2d, 0xc1,
+	0x42, 0xaa, 0xec, 0xd9, 0x8d, 0xe6, 0x06, 0xa1, 0x20, 0xaf, 0x9d, 0x82, 0xdf, 0x47, 0x0d, 0xe7,
+	0xfe, 0x25, 0xc1, 0x6b, 0x47, 0x09, 0x9f, 0x01, 0x60, 0x30, 0xc3, 0x80, 0xe1, 0xfe, 0xe9, 0x79,
+	0x9e, 0x02, 0x1a, 0x3e, 0xcb, 0x1f, 0xed, 0xf7, 0xcb, 0x11, 0x5d, 0xe0, 0x1f, 0x3d, 0x5b, 0x50,
+	0xde, 0x15, 0xf1, 0x32, 0x0d, 0xa7, 0xa4, 0x3b, 0x13, 0x96, 0xa2, 0x72, 0x9d, 0x3d, 0xe4, 0x36,
+	0xa3, 0xcc, 0x83, 0x51, 0x6d, 0x2e, 0x4a, 0xc4, 0x71, 0x1d, 0xf2, 0xdf, 0x25, 0xb8, 0x96, 0x7a,
+	0x12, 0x67, 0x90, 0x7a, 0xdd, 0x70, 0xea, 0x2d, 0x9f, 0x46, 0xea, 0xa5, 0xce, 0xff, 0xae, 0x1d,
+	0x5a, 0x0d, 0xff, 0xcf, 0x27, 0x80, 0x3b, 0x50, 0xf2, 0x8f, 0xdf, 0x1d, 0x9c, 0xbc, 0x71, 0xfc,
+	0x78, 0x9b, 0x86, 0xf2, 0x8a, 0x08, 0x70, 0xc9, 0xa7, 0x51, 0x1c, 0xd4, 0x7e, 0xca, 0x13, 0x94,
+	0x1f, 0xc1, 0x9c, 0x1a, 0xfe, 0x2f, 0x34, 0xad, 0x4c, 0x1e, 0xf7, 0xe1, 0x16, 0xf9, 0x3f, 0xb6,
+	0x52, 0x11, 0x4e, 0xcc, 0x45, 0x18, 0x14, 0xc7, 0x8c, 0xbd, 0xd8, 0x29, 0x61, 0x68, 0x74, 0x3b,
+	0xf5, 0x7c, 0x46, 0xb7, 0xf2, 0x1f, 0x72, 0x50, 0x3b, 0xa2, 0x7d, 0xa3, 0xfb, 0x80, 0xcc, 0x6d,
+	0x4a, 0xac, 0x5d, 0xd2, 0xba, 0xeb, 0xfc, 0xe2, 0xc0, 0x85, 0xf5, 0x79, 0x1f, 0x50, 0x3d, 0x88,
+	0xad, 0xc0, 0x09, 0x52, 0xa8, 0x07, 0xd3, 0x76, 0x00, 0xea, 0x89, 0x5b, 0xf0, 0x66, 0x76, 0xbf,
+	0x82, 0x40, 0x51, 0x99, 0x1b, 0x8f, 0x6a, 0x21, 0xe8, 0x88, 0x43, 0xda, 0x91, 0x06, 0xa0, 0xf9,
+	0x47, 0xe7, 0xa4, 0x7e, 0x23, 0x5b, 0x15, 0xf3, 0x4f, 0xcc, 0xeb, 0x3b, 0x81, 0xc3, 0x0a, 0xa8,
+	0x95, 0xf7, 0xa7, 0xa0, 0xec, 0x87, 0xf0, 0xe5, 0x10, 0xf5, 0xe5, 0x10, 0xf5, 0xd0, 0x21, 0x2a,
+	0xbc, 0x1c, 0xa2, 0x9e, 0x68, 0x88, 0x9a, 0x50, 0x8b, 0x4b, 0x67, 0x36, 0xbd, 0xdc, 0x97, 0xa0,
+	0x1a, 0xbb, 0xe3, 0x67, 0x3d, 0xbf, 0xfc, 0x38, 0x36, 0xbf, 0x7c, 0xfb, 0x24, 0xb0, 0x29, 0x6d,
+	0x82, 0xf9, 0xa5, 0x04, 0xf2, 0xe1, 0x3e, 0xfe, 0x4f, 0xff, 0x62, 0xe0, 0xf0, 0xad, 0xa7, 0x80,
+	0xc3, 0xff, 0x48, 0x00, 0x3e, 0x98, 0x41, 0xaf, 0x41, 0xe0, 0x47, 0x58, 0xa2, 0x74, 0x3b, 0x61,
+	0x0a, 0xd0, 0xd1, 0x75, 0x98, 0xea, 0x13, 0x4a, 0xd5, 0x8e, 0x3b, 0x10, 0xf1, 0x7e, 0x64, 0xb6,
+	0xea, 0x90, 0xb1, 0xcb, 0x47, 0x5b, 0x70, 0xde, 0x22, 0x2a, 0x15, 0xd3, 0xcc, 0xa2, 0xf2, 0x2e,
+	0x7b, 0x05, 0x63, 0x4e, 0x39, 0x18, 0xd5, 0x6e, 0x64, 0xf9, 0x39, 0x61, 0x5d, 0x3c, 0x9a, 0xb9,
+	0x10, 0x16, 0xea, 0xd0, 0x5d, 0x28, 0x0b, 0x1b, 0x81, 0x0d, 0x3b, 0x95, 0xf6, 0x92, 0xd8, 0x4d,
+	0x79, 0x35, 0xba, 0x00, 0xc7, 0x65, 0xe4, 0xfb, 0x50, 0x70, 0x81, 0x01, 0xaa, 0xc0, 0x44, 0xe0,
+	0xbd, 0xe5, 0x38, 0xce, 0x29, 0x91, 0xc0, 0xe4, 0x92, 0x03, 0x23, 0xff, 0x5e, 0x82, 0x57, 0x12,
+	0x9a, 0x12, 0xba, 0x04, 0xf9, 0xa1, 0xd5, 0x13, 0x21, 0x98, 0x1a, 0x8f, 0x6a, 0xf9, 0x0f, 0xf1,
+	0x0a, 0x66, 0x34, 0xa4, 0xc2, 0x14, 0x75, 0xc6, 0x53, 0x22, 0x99, 0x6e, 0x65, 0x3f, 0xf1, 0xe8,
+	0x5c, 0x4b, 0x29, 0xb1, 0x33, 0x70, 0xa9, 0xae, 0x5e, 0xb4, 0x08, 0x05, 0x4d, 0x55, 0x86, 0x46,
+	0xab, 0xe7, 0x9c, 0xd7, 0xb4, 0xf3, 0xc6, 0x5b, 0x6e, 0x3a, 0x34, 0xec, 0x71, 0x95, 0xb5, 0x27,
+	0xfb, 0xd5, 0x73, 0x9f, 0xef, 0x57, 0xcf, 0x3d, 0xdd, 0xaf, 0x9e, 0xfb, 0xf1, 0xb8, 0x2a, 0x3d,
+	0x19, 0x57, 0xa5, 0xcf, 0xc7, 0x55, 0xe9, 0xe9, 0xb8, 0x2a, 0xfd, 0x65, 0x5c, 0x95, 0x7e, 0xfe,
+	0x45, 0xf5, 0xdc, 0x77, 0x16, 0xb3, 0xfe, 0x98, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
+	0x7c, 0x49, 0xa4, 0xf7, 0x2a, 0x00, 0x00,
+}
+
+func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Expression)
+	copy(dAtA[i:], m.Expression)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
 }
 
 func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
@@ -971,6 +1277,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *JSONPatch) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Expression)
+	copy(dAtA[i:], m.Expression)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func (m *MatchCondition) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -1086,7 +1420,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
-func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1096,112 +1430,18 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.MatchConditions) > 0 {
-		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x62
-		}
-	}
-	if m.ObjectSelector != nil {
-		{
-			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x5a
-	}
-	if m.ReinvocationPolicy != nil {
-		i -= len(*m.ReinvocationPolicy)
-		copy(dAtA[i:], *m.ReinvocationPolicy)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy)))
-		i--
-		dAtA[i] = 0x52
-	}
-	if m.MatchPolicy != nil {
-		i -= len(*m.MatchPolicy)
-		copy(dAtA[i:], *m.MatchPolicy)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
-		i--
-		dAtA[i] = 0x4a
-	}
-	if len(m.AdmissionReviewVersions) > 0 {
-		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.AdmissionReviewVersions[iNdEx])
-			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
-			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
-			i--
-			dAtA[i] = 0x42
-		}
-	}
-	if m.TimeoutSeconds != nil {
-		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
-		i--
-		dAtA[i] = 0x38
-	}
-	if m.SideEffects != nil {
-		i -= len(*m.SideEffects)
-		copy(dAtA[i:], *m.SideEffects)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
-		i--
-		dAtA[i] = 0x32
-	}
-	if m.NamespaceSelector != nil {
-		{
-			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.FailurePolicy != nil {
-		i -= len(*m.FailurePolicy)
-		copy(dAtA[i:], *m.FailurePolicy)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
-		i--
-		dAtA[i] = 0x22
-	}
-	if len(m.Rules) > 0 {
-		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
 	{
-		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
 			return 0, err
 		}
@@ -1210,15 +1450,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	}
 	i--
 	dAtA[i] = 0x12
-	i -= len(m.Name)
-	copy(dAtA[i:], m.Name)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
 	i--
 	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1228,30 +1473,26 @@ func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.Webhooks) > 0 {
-		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
 		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
+	i--
+	dAtA[i] = 0x12
 	{
 		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -1265,7 +1506,7 @@ func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, e
 	return len(dAtA) - i, nil
 }
 
-func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1275,12 +1516,12 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
@@ -1312,7 +1553,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in
 	return len(dAtA) - i, nil
 }
 
-func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1322,39 +1563,49 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	{
-		size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
+	if m.MatchResources != nil {
+		{
+			size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x1a
 	}
-	i--
-	dAtA[i] = 0x12
-	if len(m.ResourceNames) > 0 {
-		for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ResourceNames[iNdEx])
-			copy(dAtA[i:], m.ResourceNames[iNdEx])
-			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
-			i--
-			dAtA[i] = 0xa
+	if m.ParamRef != nil {
+		{
+			size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
+		i--
+		dAtA[i] = 0x12
 	}
+	i -= len(m.PolicyName)
+	copy(dAtA[i:], m.PolicyName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
+	i--
+	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *ParamKind) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1364,30 +1615,44 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	i -= len(m.Kind)
-	copy(dAtA[i:], m.Kind)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
-	i--
-	dAtA[i] = 0x12
-	i -= len(m.APIVersion)
-	copy(dAtA[i:], m.APIVersion)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
 	i--
 	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *ParamRef) Marshal() (dAtA []byte, err error) {
+func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1397,26 +1662,73 @@ func (m *ParamRef) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if m.ParameterNotFoundAction != nil {
-		i -= len(*m.ParameterNotFoundAction)
-		copy(dAtA[i:], *m.ParameterNotFoundAction)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
+	i -= len(m.ReinvocationPolicy)
+	copy(dAtA[i:], m.ReinvocationPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy)))
+	i--
+	dAtA[i] = 0x3a
+	if len(m.MatchConditions) > 0 {
+		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.FailurePolicy != nil {
+		i -= len(*m.FailurePolicy)
+		copy(dAtA[i:], *m.FailurePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
 		i--
-		dAtA[i] = 0x22
+		dAtA[i] = 0x2a
 	}
-	if m.Selector != nil {
+	if len(m.Mutations) > 0 {
+		for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Variables) > 0 {
+		for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.MatchConstraints != nil {
 		{
-			size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+			size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
 			if err != nil {
 				return 0, err
 			}
@@ -1424,67 +1736,24 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
 		i--
-		dAtA[i] = 0x1a
-	}
-	i -= len(m.Namespace)
-	copy(dAtA[i:], m.Namespace)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
-	i--
-	dAtA[i] = 0x12
-	i -= len(m.Name)
-	copy(dAtA[i:], m.Name)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.Port != nil {
-		i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
-		i--
-		dAtA[i] = 0x20
+		dAtA[i] = 0x12
 	}
-	if m.Path != nil {
-		i -= len(*m.Path)
-		copy(dAtA[i:], *m.Path)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
+	if m.ParamKind != nil {
+		{
+			size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
 		i--
-		dAtA[i] = 0x1a
+		dAtA[i] = 0xa
 	}
-	i -= len(m.Name)
-	copy(dAtA[i:], m.Name)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
-	i--
-	dAtA[i] = 0x12
-	i -= len(m.Namespace)
-	copy(dAtA[i:], m.Namespace)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
-	i--
-	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
+func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1494,20 +1763,20 @@ func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.ExpressionWarnings) > 0 {
-		for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
+	if len(m.MatchConditions) > 0 {
+		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
 			{
-				size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
 				if err != nil {
 					return 0, err
 				}
@@ -1515,54 +1784,91 @@ func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 				i = encodeVarintGenerated(dAtA, i, uint64(size))
 			}
 			i--
-			dAtA[i] = 0xa
+			dAtA[i] = 0x62
 		}
 	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
+	if m.ObjectSelector != nil {
+		{
+			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
 	}
-	return dAtA[:n], nil
-}
-
-func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	{
-		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
+	if m.ReinvocationPolicy != nil {
+		i -= len(*m.ReinvocationPolicy)
+		copy(dAtA[i:], *m.ReinvocationPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy)))
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.MatchPolicy != nil {
+		i -= len(*m.MatchPolicy)
+		copy(dAtA[i:], *m.MatchPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if len(m.AdmissionReviewVersions) > 0 {
+		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.AdmissionReviewVersions[iNdEx])
+			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
+			i--
+			dAtA[i] = 0x42
 		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
-	i--
-	dAtA[i] = 0x1a
-	{
-		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
+	if m.TimeoutSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.SideEffects != nil {
+		i -= len(*m.SideEffects)
+		copy(dAtA[i:], *m.SideEffects)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.NamespaceSelector != nil {
+		{
+			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.FailurePolicy != nil {
+		i -= len(*m.FailurePolicy)
+		copy(dAtA[i:], *m.FailurePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.Rules) > 0 {
+		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
 		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
-	i--
-	dAtA[i] = 0x12
 	{
-		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
 			return 0, err
 		}
@@ -1570,11 +1876,16 @@ func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, erro
 		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
 	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
 	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
+func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1584,26 +1895,30 @@ func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	{
-		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
+	if len(m.Webhooks) > 0 {
+		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
 		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
-	i--
-	dAtA[i] = 0x12
 	{
 		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -1617,7 +1932,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
+func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1627,12 +1942,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
@@ -1664,7 +1979,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte)
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
+func (m *Mutation) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1674,28 +1989,19 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *Mutation) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.ValidationActions) > 0 {
-		for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ValidationActions[iNdEx])
-			copy(dAtA[i:], m.ValidationActions[iNdEx])
-			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
-			i--
-			dAtA[i] = 0x22
-		}
-	}
-	if m.MatchResources != nil {
+	if m.JSONPatch != nil {
 		{
-			size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+			size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i])
 			if err != nil {
 				return 0, err
 			}
@@ -1703,11 +2009,11 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
 			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
 		i--
-		dAtA[i] = 0x1a
+		dAtA[i] = 0x22
 	}
-	if m.ParamRef != nil {
+	if m.ApplyConfiguration != nil {
 		{
-			size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+			size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i])
 			if err != nil {
 				return 0, err
 			}
@@ -1715,17 +2021,17 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte)
 			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
 		i--
-		dAtA[i] = 0x12
+		dAtA[i] = 0x1a
 	}
-	i -= len(m.PolicyName)
-	copy(dAtA[i:], m.PolicyName)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
+	i -= len(m.PatchType)
+	copy(dAtA[i:], m.PatchType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType)))
 	i--
-	dAtA[i] = 0xa
+	dAtA[i] = 0x12
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
+func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1735,32 +2041,18 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.Items) > 0 {
-		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
 	{
-		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
 			return 0, err
 		}
@@ -1768,11 +2060,20 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int,
 		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
 	i--
-	dAtA[i] = 0xa
+	dAtA[i] = 0x12
+	if len(m.ResourceNames) > 0 {
+		for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ResourceNames[iNdEx])
+			copy(dAtA[i:], m.ResourceNames[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
+func (m *ParamKind) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1782,94 +2083,59 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.Variables) > 0 {
-		for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x3a
-		}
-	}
-	if len(m.MatchConditions) > 0 {
-		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x32
-		}
-	}
-	if len(m.AuditAnnotations) > 0 {
-		for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x2a
-		}
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ParamRef) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
-	if m.FailurePolicy != nil {
-		i -= len(*m.FailurePolicy)
-		copy(dAtA[i:], *m.FailurePolicy)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+	return dAtA[:n], nil
+}
+
+func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ParameterNotFoundAction != nil {
+		i -= len(*m.ParameterNotFoundAction)
+		copy(dAtA[i:], *m.ParameterNotFoundAction)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
 		i--
 		dAtA[i] = 0x22
 	}
-	if len(m.Validations) > 0 {
-		for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.MatchConstraints != nil {
-		{
-			size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ParamKind != nil {
+	if m.Selector != nil {
 		{
-			size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+			size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
 			if err != nil {
 				return 0, err
 			}
@@ -1877,12 +2143,22 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
 			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
 		i--
-		dAtA[i] = 0xa
+		dAtA[i] = 0x1a
 	}
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1892,49 +2168,42 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.Conditions) > 0 {
-		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
+	if m.Port != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
+		i--
+		dAtA[i] = 0x20
 	}
-	if m.TypeChecking != nil {
-		{
-			size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
-		}
+	if m.Path != nil {
+		i -= len(*m.Path)
+		copy(dAtA[i:], *m.Path)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
 		i--
-		dAtA[i] = 0x12
+		dAtA[i] = 0x1a
 	}
-	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
 	i--
-	dAtA[i] = 0x8
+	dAtA[i] = 0x12
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
+func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -1944,20 +2213,20 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.MatchConditions) > 0 {
-		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+	if len(m.ExpressionWarnings) > 0 {
+		for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
 			{
-				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
 				if err != nil {
 					return 0, err
 				}
@@ -1965,84 +2234,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 				i = encodeVarintGenerated(dAtA, i, uint64(size))
 			}
 			i--
-			dAtA[i] = 0x5a
-		}
-	}
-	if m.ObjectSelector != nil {
-		{
-			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
+			dAtA[i] = 0xa
 		}
-		i--
-		dAtA[i] = 0x52
 	}
-	if m.MatchPolicy != nil {
-		i -= len(*m.MatchPolicy)
-		copy(dAtA[i:], *m.MatchPolicy)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
-		i--
-		dAtA[i] = 0x4a
+	return len(dAtA) - i, nil
+}
+
+func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
-	if len(m.AdmissionReviewVersions) > 0 {
-		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.AdmissionReviewVersions[iNdEx])
-			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
-			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
-			i--
-			dAtA[i] = 0x42
-		}
-	}
-	if m.TimeoutSeconds != nil {
-		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
-		i--
-		dAtA[i] = 0x38
-	}
-	if m.SideEffects != nil {
-		i -= len(*m.SideEffects)
-		copy(dAtA[i:], *m.SideEffects)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
-		i--
-		dAtA[i] = 0x32
-	}
-	if m.NamespaceSelector != nil {
-		{
-			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.FailurePolicy != nil {
-		i -= len(*m.FailurePolicy)
-		copy(dAtA[i:], *m.FailurePolicy)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
-		i--
-		dAtA[i] = 0x22
-	}
-	if len(m.Rules) > 0 {
-		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
+	return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
 		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
+	i--
+	dAtA[i] = 0x1a
 	{
-		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
 			return 0, err
 		}
@@ -2051,15 +2280,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	}
 	i--
 	dAtA[i] = 0x12
-	i -= len(m.Name)
-	copy(dAtA[i:], m.Name)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
 	i--
 	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2069,30 +2303,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if len(m.Webhooks) > 0 {
-		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
 		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
+	i--
+	dAtA[i] = 0x12
 	{
 		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -2106,7 +2336,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int,
 	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2116,12 +2346,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error)
 	return dAtA[:n], nil
 }
 
-func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
@@ -2153,7 +2383,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (
 	return len(dAtA) - i, nil
 }
 
-func (m *Validation) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2163,42 +2393,58 @@ func (m *Validation) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	i -= len(m.MessageExpression)
-	copy(dAtA[i:], m.MessageExpression)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
-	i--
-	dAtA[i] = 0x22
-	if m.Reason != nil {
-		i -= len(*m.Reason)
-		copy(dAtA[i:], *m.Reason)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+	if len(m.ValidationActions) > 0 {
+		for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ValidationActions[iNdEx])
+			copy(dAtA[i:], m.ValidationActions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if m.MatchResources != nil {
+		{
+			size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
 		i--
 		dAtA[i] = 0x1a
 	}
-	i -= len(m.Message)
-	copy(dAtA[i:], m.Message)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
-	i--
-	dAtA[i] = 0x12
-	i -= len(m.Expression)
-	copy(dAtA[i:], m.Expression)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+	if m.ParamRef != nil {
+		{
+			size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.PolicyName)
+	copy(dAtA[i:], m.PolicyName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
 	i--
 	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *Variable) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2208,30 +2454,44 @@ func (m *Variable) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	i -= len(m.Expression)
-	copy(dAtA[i:], m.Expression)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
-	i--
-	dAtA[i] = 0x12
-	i -= len(m.Name)
-	copy(dAtA[i:], m.Name)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
 	i--
 	dAtA[i] = 0xa
 	return len(dAtA) - i, nil
 }
 
-func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
 	n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2241,335 +2501,636 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
 	return dAtA[:n], nil
 }
 
-func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
 	size := m.Size()
 	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	i := len(dAtA)
 	_ = i
 	var l int
 	_ = l
-	if m.URL != nil {
-		i -= len(*m.URL)
-		copy(dAtA[i:], *m.URL)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.CABundle != nil {
-		i -= len(m.CABundle)
-		copy(dAtA[i:], m.CABundle)
-		i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Service != nil {
-		{
-			size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
+	if len(m.Variables) > 0 {
+		for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
 			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x3a
 		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
-	offset -= sovGenerated(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
 	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *AuditAnnotation) Size() (n int) {
-	if m == nil {
-		return 0
+	if len(m.MatchConditions) > 0 {
+		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
 	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.ValueExpression)
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
+	if len(m.AuditAnnotations) > 0 {
+		for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.FailurePolicy != nil {
+		i -= len(*m.FailurePolicy)
+		copy(dAtA[i:], *m.FailurePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.Validations) > 0 {
+		for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.MatchConstraints != nil {
+		{
+			size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ParamKind != nil {
+		{
+			size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
 }
 
-func (m *ExpressionWarning) Size() (n int) {
-	if m == nil {
-		return 0
+func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
-	var l int
-	_ = l
-	l = len(m.FieldRef)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Warning)
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
+	return dAtA[:n], nil
 }
 
-func (m *MatchCondition) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Expression)
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
+func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *MatchResources) Size() (n int) {
-	if m == nil {
-		return 0
-	}
+func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
 	var l int
 	_ = l
-	if m.NamespaceSelector != nil {
-		l = m.NamespaceSelector.Size()
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if m.ObjectSelector != nil {
-		l = m.ObjectSelector.Size()
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if len(m.ResourceRules) > 0 {
-		for _, e := range m.ResourceRules {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
 		}
 	}
-	if len(m.ExcludeResourceRules) > 0 {
-		for _, e := range m.ExcludeResourceRules {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	if m.TypeChecking != nil {
+		{
+			size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
+		i--
+		dAtA[i] = 0x12
 	}
-	if m.MatchPolicy != nil {
-		l = len(*m.MatchPolicy)
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	return n
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
 }
 
-func (m *MutatingWebhook) Size() (n int) {
-	if m == nil {
-		return 0
+func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
+	return dAtA[:n], nil
+}
+
+func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
 	var l int
 	_ = l
-	l = len(m.Name)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = m.ClientConfig.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	if len(m.Rules) > 0 {
-		for _, e := range m.Rules {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	if len(m.MatchConditions) > 0 {
+		for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x5a
 		}
 	}
-	if m.FailurePolicy != nil {
-		l = len(*m.FailurePolicy)
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if m.NamespaceSelector != nil {
-		l = m.NamespaceSelector.Size()
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if m.SideEffects != nil {
-		l = len(*m.SideEffects)
-		n += 1 + l + sovGenerated(uint64(l))
+	if m.ObjectSelector != nil {
+		{
+			size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
 	}
-	if m.TimeoutSeconds != nil {
-		n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+	if m.MatchPolicy != nil {
+		i -= len(*m.MatchPolicy)
+		copy(dAtA[i:], *m.MatchPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
+		i--
+		dAtA[i] = 0x4a
 	}
 	if len(m.AdmissionReviewVersions) > 0 {
-		for _, s := range m.AdmissionReviewVersions {
-			l = len(s)
-			n += 1 + l + sovGenerated(uint64(l))
+		for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.AdmissionReviewVersions[iNdEx])
+			copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
+			i--
+			dAtA[i] = 0x42
 		}
 	}
-	if m.MatchPolicy != nil {
-		l = len(*m.MatchPolicy)
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if m.ReinvocationPolicy != nil {
-		l = len(*m.ReinvocationPolicy)
-		n += 1 + l + sovGenerated(uint64(l))
+	if m.TimeoutSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+		i--
+		dAtA[i] = 0x38
 	}
-	if m.ObjectSelector != nil {
-		l = m.ObjectSelector.Size()
-		n += 1 + l + sovGenerated(uint64(l))
+	if m.SideEffects != nil {
+		i -= len(*m.SideEffects)
+		copy(dAtA[i:], *m.SideEffects)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
+		i--
+		dAtA[i] = 0x32
 	}
-	if len(m.MatchConditions) > 0 {
-		for _, e := range m.MatchConditions {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	if m.NamespaceSelector != nil {
+		{
+			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
 		}
+		i--
+		dAtA[i] = 0x2a
 	}
-	return n
-}
-
-func (m *MutatingWebhookConfiguration) Size() (n int) {
-	if m == nil {
-		return 0
+	if m.FailurePolicy != nil {
+		i -= len(*m.FailurePolicy)
+		copy(dAtA[i:], *m.FailurePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+		i--
+		dAtA[i] = 0x22
 	}
-	var l int
-	_ = l
-	l = m.ObjectMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	if len(m.Webhooks) > 0 {
-		for _, e := range m.Webhooks {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Rules) > 0 {
+		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
 		}
 	}
-	return n
-}
-
-func (m *MutatingWebhookConfigurationList) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ListMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	if len(m.Items) > 0 {
-		for _, e := range m.Items {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	{
+		size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
 		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
-	return n
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
 }
 
-func (m *NamedRuleWithOperations) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.ResourceNames) > 0 {
-		for _, s := range m.ResourceNames {
-			l = len(s)
-			n += 1 + l + sovGenerated(uint64(l))
-		}
+func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
-	l = m.RuleWithOperations.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
+	return dAtA[:n], nil
 }
 
-func (m *ParamKind) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.APIVersion)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Kind)
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
+func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
 }
 
-func (m *ParamRef) Size() (n int) {
-	if m == nil {
-		return 0
-	}
+func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
 	var l int
 	_ = l
-	l = len(m.Name)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Namespace)
-	n += 1 + l + sovGenerated(uint64(l))
-	if m.Selector != nil {
-		l = m.Selector.Size()
-		n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Webhooks) > 0 {
+		for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
 	}
-	if m.ParameterNotFoundAction != nil {
-		l = len(*m.ParameterNotFoundAction)
-		n += 1 + l + sovGenerated(uint64(l))
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
 	}
-	return n
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
 }
 
-func (m *ServiceReference) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Namespace)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Name)
-	n += 1 + l + sovGenerated(uint64(l))
-	if m.Path != nil {
-		l = len(*m.Path)
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if m.Port != nil {
-		n += 1 + sovGenerated(uint64(*m.Port))
+func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
-	return n
+	return dAtA[:n], nil
 }
 
-func (m *TypeChecking) Size() (n int) {
-	if m == nil {
-		return 0
-	}
+func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
 	var l int
 	_ = l
-	if len(m.ExpressionWarnings) > 0 {
-		for _, e := range m.ExpressionWarnings {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
 		}
 	}
-	return n
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
 }
 
-func (m *ValidatingAdmissionPolicy) Size() (n int) {
-	if m == nil {
-		return 0
+func (m *Validation) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
 	}
-	var l int
-	_ = l
-	l = m.ObjectMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	l = m.Spec.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	l = m.Status.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
+	return dAtA[:n], nil
 }
 
-func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ObjectMeta.Size()
+func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.MessageExpression)
+	copy(dAtA[i:], m.MessageExpression)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
+	i--
+	dAtA[i] = 0x22
+	if m.Reason != nil {
+		i -= len(*m.Reason)
+		copy(dAtA[i:], *m.Reason)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Expression)
+	copy(dAtA[i:], m.Expression)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Variable) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Expression)
+	copy(dAtA[i:], m.Expression)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.URL != nil {
+		i -= len(*m.URL)
+		copy(dAtA[i:], *m.URL)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.CABundle != nil {
+		i -= len(m.CABundle)
+		copy(dAtA[i:], m.CABundle)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Service != nil {
+		{
+			size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *ApplyConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Expression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *AuditAnnotation) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ValueExpression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ExpressionWarning) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.FieldRef)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Warning)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *JSONPatch) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Expression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *MatchCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Expression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *MatchResources) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.NamespaceSelector != nil {
+		l = m.NamespaceSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ObjectSelector != nil {
+		l = m.ObjectSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ResourceRules) > 0 {
+		for _, e := range m.ResourceRules {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.ExcludeResourceRules) > 0 {
+		for _, e := range m.ExcludeResourceRules {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.MatchPolicy != nil {
+		l = len(*m.MatchPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *MutatingAdmissionPolicy) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
 	n += 1 + l + sovGenerated(uint64(l))
 	l = m.Spec.Size()
 	n += 1 + l + sovGenerated(uint64(l))
 	return n
 }
 
-func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
+func (m *MutatingAdmissionPolicyBinding) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *MutatingAdmissionPolicyBindingList) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2586,7 +3147,7 @@ func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
 	return n
 }
 
-func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
+func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2602,16 +3163,10 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
 		l = m.MatchResources.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	if len(m.ValidationActions) > 0 {
-		for _, s := range m.ValidationActions {
-			l = len(s)
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
 	return n
 }
 
-func (m *ValidatingAdmissionPolicyList) Size() (n int) {
+func (m *MutatingAdmissionPolicyList) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2628,7 +3183,7 @@ func (m *ValidatingAdmissionPolicyList) Size() (n int) {
 	return n
 }
 
-func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
+func (m *MutatingAdmissionPolicySpec) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2642,8 +3197,14 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
 		l = m.MatchConstraints.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	if len(m.Validations) > 0 {
-		for _, e := range m.Validations {
+	if len(m.Variables) > 0 {
+		for _, e := range m.Variables {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Mutations) > 0 {
+		for _, e := range m.Mutations {
 			l = e.Size()
 			n += 1 + l + sovGenerated(uint64(l))
 		}
@@ -2652,48 +3213,18 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
 		l = len(*m.FailurePolicy)
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	if len(m.AuditAnnotations) > 0 {
-		for _, e := range m.AuditAnnotations {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
 	if len(m.MatchConditions) > 0 {
 		for _, e := range m.MatchConditions {
 			l = e.Size()
 			n += 1 + l + sovGenerated(uint64(l))
 		}
 	}
-	if len(m.Variables) > 0 {
-		for _, e := range m.Variables {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
-	return n
-}
-
-func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
-	if m.TypeChecking != nil {
-		l = m.TypeChecking.Size()
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	if len(m.Conditions) > 0 {
-		for _, e := range m.Conditions {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
+	l = len(m.ReinvocationPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
 	return n
 }
 
-func (m *ValidatingWebhook) Size() (n int) {
+func (m *MutatingWebhook) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2734,6 +3265,10 @@ func (m *ValidatingWebhook) Size() (n int) {
 		l = len(*m.MatchPolicy)
 		n += 1 + l + sovGenerated(uint64(l))
 	}
+	if m.ReinvocationPolicy != nil {
+		l = len(*m.ReinvocationPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	if m.ObjectSelector != nil {
 		l = m.ObjectSelector.Size()
 		n += 1 + l + sovGenerated(uint64(l))
@@ -2747,7 +3282,7 @@ func (m *ValidatingWebhook) Size() (n int) {
 	return n
 }
 
-func (m *ValidatingWebhookConfiguration) Size() (n int) {
+func (m *MutatingWebhookConfiguration) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2764,7 +3299,7 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) {
 	return n
 }
 
-func (m *ValidatingWebhookConfigurationList) Size() (n int) {
+func (m *MutatingWebhookConfigurationList) Size() (n int) {
 	if m == nil {
 		return 0
 	}
@@ -2781,476 +3316,1911 @@ func (m *ValidatingWebhookConfigurationList) Size() (n int) {
 	return n
 }
 
-func (m *Validation) Size() (n int) {
+func (m *Mutation) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
-	l = len(m.Expression)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Message)
+	l = len(m.PatchType)
 	n += 1 + l + sovGenerated(uint64(l))
-	if m.Reason != nil {
-		l = len(*m.Reason)
+	if m.ApplyConfiguration != nil {
+		l = m.ApplyConfiguration.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	l = len(m.MessageExpression)
+	if m.JSONPatch != nil {
+		l = m.JSONPatch.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *NamedRuleWithOperations) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.ResourceNames) > 0 {
+		for _, s := range m.ResourceNames {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.RuleWithOperations.Size()
 	n += 1 + l + sovGenerated(uint64(l))
 	return n
 }
 
-func (m *Variable) Size() (n int) {
+func (m *ParamKind) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
-	l = len(m.Name)
+	l = len(m.APIVersion)
 	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Expression)
+	l = len(m.Kind)
 	n += 1 + l + sovGenerated(uint64(l))
 	return n
 }
 
-func (m *WebhookClientConfig) Size() (n int) {
+func (m *ParamRef) Size() (n int) {
 	if m == nil {
 		return 0
 	}
 	var l int
 	_ = l
-	if m.Service != nil {
-		l = m.Service.Size()
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Selector != nil {
+		l = m.Selector.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	if m.CABundle != nil {
-		l = len(m.CABundle)
+	if m.ParameterNotFoundAction != nil {
+		l = len(*m.ParameterNotFoundAction)
 		n += 1 + l + sovGenerated(uint64(l))
 	}
-	if m.URL != nil {
-		l = len(*m.URL)
+	return n
+}
+
+func (m *ServiceReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Path != nil {
+		l = len(*m.Path)
 		n += 1 + l + sovGenerated(uint64(l))
 	}
+	if m.Port != nil {
+		n += 1 + sovGenerated(uint64(*m.Port))
+	}
 	return n
 }
 
-func sovGenerated(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
-	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+func (m *TypeChecking) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.ExpressionWarnings) > 0 {
+		for _, e := range m.ExpressionWarnings {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
 }
-func (this *AuditAnnotation) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicy) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	s := strings.Join([]string{`&AuditAnnotation{`,
-		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
-		`ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
-		`}`,
-	}, "")
-	return s
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
 }
-func (this *ExpressionWarning) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	s := strings.Join([]string{`&ExpressionWarning{`,
-		`FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
-		`Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
-		`}`,
-	}, "")
-	return s
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
 }
-func (this *MatchCondition) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	s := strings.Join([]string{`&MatchCondition{`,
-		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
-		`}`,
-	}, "")
-	return s
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
 }
-func (this *MatchResources) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
-	for _, f := range this.ResourceRules {
-		repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+	var l int
+	_ = l
+	l = len(m.PolicyName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ParamRef != nil {
+		l = m.ParamRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForResourceRules += "}"
-	repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
-	for _, f := range this.ExcludeResourceRules {
-		repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+	if m.MatchResources != nil {
+		l = m.MatchResources.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForExcludeResourceRules += "}"
-	s := strings.Join([]string{`&MatchResources{`,
-		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`ResourceRules:` + repeatedStringForResourceRules + `,`,
-		`ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
-		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
-		`}`,
-	}, "")
-	return s
+	if len(m.ValidationActions) > 0 {
+		for _, s := range m.ValidationActions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
 }
-func (this *MutatingWebhook) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicyList) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	repeatedStringForRules := "[]RuleWithOperations{"
-	for _, f := range this.Rules {
-		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	repeatedStringForRules += "}"
-	repeatedStringForMatchConditions := "[]MatchCondition{"
-	for _, f := range this.MatchConditions {
-		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForMatchConditions += "}"
-	s := strings.Join([]string{`&MutatingWebhook{`,
-		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
-		`Rules:` + repeatedStringForRules + `,`,
-		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
-		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
-		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
-		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
-		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
-		`ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
-		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
-		`}`,
-	}, "")
-	return s
+	return n
 }
-func (this *MutatingWebhookConfiguration) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	repeatedStringForWebhooks := "[]MutatingWebhook{"
-	for _, f := range this.Webhooks {
-		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
+	var l int
+	_ = l
+	if m.ParamKind != nil {
+		l = m.ParamKind.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForWebhooks += "}"
-	s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
-		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Webhooks:` + repeatedStringForWebhooks + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *MutatingWebhookConfigurationList) String() string {
-	if this == nil {
-		return "nil"
+	if m.MatchConstraints != nil {
+		l = m.MatchConstraints.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForItems := "[]MutatingWebhookConfiguration{"
-	for _, f := range this.Items {
-		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+	if len(m.Validations) > 0 {
+		for _, e := range m.Validations {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	repeatedStringForItems += "}"
-	s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
-		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
-		`Items:` + repeatedStringForItems + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *NamedRuleWithOperations) String() string {
-	if this == nil {
-		return "nil"
+	if m.FailurePolicy != nil {
+		l = len(*m.FailurePolicy)
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	s := strings.Join([]string{`&NamedRuleWithOperations{`,
-		`ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
-		`RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ParamKind) String() string {
-	if this == nil {
-		return "nil"
+	if len(m.AuditAnnotations) > 0 {
+		for _, e := range m.AuditAnnotations {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	s := strings.Join([]string{`&ParamKind{`,
-		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
-		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ParamRef) String() string {
-	if this == nil {
-		return "nil"
+	if len(m.MatchConditions) > 0 {
+		for _, e := range m.MatchConditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	s := strings.Join([]string{`&ParamRef{`,
-		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
-		`Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ServiceReference) String() string {
-	if this == nil {
-		return "nil"
+	if len(m.Variables) > 0 {
+		for _, e := range m.Variables {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	s := strings.Join([]string{`&ServiceReference{`,
-		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
-		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Path:` + valueToStringGenerated(this.Path) + `,`,
-		`Port:` + valueToStringGenerated(this.Port) + `,`,
-		`}`,
-	}, "")
-	return s
+	return n
 }
-func (this *TypeChecking) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
-	for _, f := range this.ExpressionWarnings {
-		repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+	if m.TypeChecking != nil {
+		l = m.TypeChecking.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForExpressionWarnings += "}"
-	s := strings.Join([]string{`&TypeChecking{`,
-		`ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ValidatingAdmissionPolicy) String() string {
-	if this == nil {
-		return "nil"
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
-		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
-		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
-		`}`,
-	}, "")
-	return s
+	return n
 }
-func (this *ValidatingAdmissionPolicyBinding) String() string {
-	if this == nil {
-		return "nil"
+
+func (m *ValidatingWebhook) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
-		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ValidatingAdmissionPolicyBindingList) String() string {
-	if this == nil {
-		return "nil"
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.ClientConfig.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Rules) > 0 {
+		for _, e := range m.Rules {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
-	for _, f := range this.Items {
-		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+	if m.FailurePolicy != nil {
+		l = len(*m.FailurePolicy)
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForItems += "}"
-	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
-		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
-		`Items:` + repeatedStringForItems + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
-	if this == nil {
-		return "nil"
+	if m.NamespaceSelector != nil {
+		l = m.NamespaceSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
-		`PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
-		`ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
-		`MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
-		`ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ValidatingAdmissionPolicyList) String() string {
-	if this == nil {
-		return "nil"
+	if m.SideEffects != nil {
+		l = len(*m.SideEffects)
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
-	for _, f := range this.Items {
-		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+	if m.TimeoutSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
 	}
-	repeatedStringForItems += "}"
-	s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
-		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
-		`Items:` + repeatedStringForItems + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ValidatingAdmissionPolicySpec) String() string {
-	if this == nil {
-		return "nil"
+	if len(m.AdmissionReviewVersions) > 0 {
+		for _, s := range m.AdmissionReviewVersions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	repeatedStringForValidations := "[]Validation{"
-	for _, f := range this.Validations {
-		repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+	if m.MatchPolicy != nil {
+		l = len(*m.MatchPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForValidations += "}"
-	repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
-	for _, f := range this.AuditAnnotations {
-		repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+	if m.ObjectSelector != nil {
+		l = m.ObjectSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
 	}
-	repeatedStringForAuditAnnotations += "}"
-	repeatedStringForMatchConditions := "[]MatchCondition{"
-	for _, f := range this.MatchConditions {
-		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+	if len(m.MatchConditions) > 0 {
+		for _, e := range m.MatchConditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
 	}
-	repeatedStringForMatchConditions += "}"
-	repeatedStringForVariables := "[]Variable{"
-	for _, f := range this.Variables {
-		repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+	return n
+}
+
+func (m *ValidatingWebhookConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	repeatedStringForVariables += "}"
-	s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
-		`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
-		`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
-		`Validations:` + repeatedStringForValidations + `,`,
-		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
-		`AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
-		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
-		`Variables:` + repeatedStringForVariables + `,`,
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Webhooks) > 0 {
+		for _, e := range m.Webhooks {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ValidatingWebhookConfigurationList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Validation) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Expression)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Reason != nil {
+		l = len(*m.Reason)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.MessageExpression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Variable) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Expression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *WebhookClientConfig) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Service != nil {
+		l = m.Service.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.CABundle != nil {
+		l = len(m.CABundle)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.URL != nil {
+		l = len(*m.URL)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ApplyConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ApplyConfiguration{`,
+		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
 		`}`,
 	}, "")
 	return s
 }
-func (this *ValidatingAdmissionPolicyStatus) String() string {
+func (this *AuditAnnotation) String() string {
 	if this == nil {
 		return "nil"
 	}
-	repeatedStringForConditions := "[]Condition{"
-	for _, f := range this.Conditions {
-		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
-	}
-	repeatedStringForConditions += "}"
-	s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
-		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
-		`TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
-		`Conditions:` + repeatedStringForConditions + `,`,
+	s := strings.Join([]string{`&AuditAnnotation{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
 		`}`,
 	}, "")
 	return s
 }
-func (this *ValidatingWebhook) String() string {
+func (this *ExpressionWarning) String() string {
 	if this == nil {
 		return "nil"
 	}
-	repeatedStringForRules := "[]RuleWithOperations{"
-	for _, f := range this.Rules {
-		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+	s := strings.Join([]string{`&ExpressionWarning{`,
+		`FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
+		`Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *JSONPatch) String() string {
+	if this == nil {
+		return "nil"
 	}
-	repeatedStringForRules += "}"
-	repeatedStringForMatchConditions := "[]MatchCondition{"
-	for _, f := range this.MatchConditions {
-		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+	s := strings.Join([]string{`&JSONPatch{`,
+		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MatchCondition) String() string {
+	if this == nil {
+		return "nil"
 	}
-	repeatedStringForMatchConditions += "}"
-	s := strings.Join([]string{`&ValidatingWebhook{`,
+	s := strings.Join([]string{`&MatchCondition{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
-		`Rules:` + repeatedStringForRules + `,`,
-		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MatchResources) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
+	for _, f := range this.ResourceRules {
+		repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResourceRules += "}"
+	repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
+	for _, f := range this.ExcludeResourceRules {
+		repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForExcludeResourceRules += "}"
+	s := strings.Join([]string{`&MatchResources{`,
 		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
-		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
-		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
-		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
 		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
-		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
+		`ResourceRules:` + repeatedStringForResourceRules + `,`,
+		`ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
+		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
 		`}`,
 	}, "")
 	return s
 }
-func (this *ValidatingWebhookConfiguration) String() string {
+func (this *MutatingAdmissionPolicy) String() string {
 	if this == nil {
 		return "nil"
 	}
-	repeatedStringForWebhooks := "[]ValidatingWebhook{"
-	for _, f := range this.Webhooks {
-		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
+	s := strings.Join([]string{`&MutatingAdmissionPolicy{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MutatingAdmissionPolicyBinding) String() string {
+	if this == nil {
+		return "nil"
 	}
-	repeatedStringForWebhooks += "}"
-	s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
+	s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`,
 		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Webhooks:` + repeatedStringForWebhooks + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
 		`}`,
 	}, "")
 	return s
 }
-func (this *ValidatingWebhookConfigurationList) String() string {
+func (this *MutatingAdmissionPolicyBindingList) String() string {
 	if this == nil {
 		return "nil"
 	}
-	repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
+	repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{"
 	for _, f := range this.Items {
-		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
 	}
 	repeatedStringForItems += "}"
-	s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
+	s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`,
 		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
 		`Items:` + repeatedStringForItems + `,`,
 		`}`,
 	}, "")
 	return s
 }
-func (this *Validation) String() string {
+func (this *MutatingAdmissionPolicyBindingSpec) String() string {
 	if this == nil {
 		return "nil"
 	}
-	s := strings.Join([]string{`&Validation{`,
-		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
-		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
-		`Reason:` + valueToStringGenerated(this.Reason) + `,`,
-		`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+	s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`,
+		`PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+		`ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+		`MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
 		`}`,
 	}, "")
 	return s
 }
-func (this *Variable) String() string {
+func (this *MutatingAdmissionPolicyList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]MutatingAdmissionPolicy{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&MutatingAdmissionPolicyList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MutatingAdmissionPolicySpec) String() string {
 	if this == nil {
 		return "nil"
 	}
-	s := strings.Join([]string{`&Variable{`,
-		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *WebhookClientConfig) String() string {
-	if this == nil {
-		return "nil"
+	repeatedStringForVariables := "[]Variable{"
+	for _, f := range this.Variables {
+		repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVariables += "}"
+	repeatedStringForMutations := "[]Mutation{"
+	for _, f := range this.Mutations {
+		repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMutations += "}"
+	repeatedStringForMatchConditions := "[]MatchCondition{"
+	for _, f := range this.MatchConditions {
+		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchConditions += "}"
+	s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`,
+		`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+		`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+		`Variables:` + repeatedStringForVariables + `,`,
+		`Mutations:` + repeatedStringForMutations + `,`,
+		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
+		`ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MutatingWebhook) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRules := "[]RuleWithOperations{"
+	for _, f := range this.Rules {
+		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForRules += "}"
+	repeatedStringForMatchConditions := "[]MatchCondition{"
+	for _, f := range this.MatchConditions {
+		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchConditions += "}"
+	s := strings.Join([]string{`&MutatingWebhook{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
+		`Rules:` + repeatedStringForRules + `,`,
+		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
+		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
+		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+		`ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
+		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MutatingWebhookConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForWebhooks := "[]MutatingWebhook{"
+	for _, f := range this.Webhooks {
+		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForWebhooks += "}"
+	s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Webhooks:` + repeatedStringForWebhooks + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MutatingWebhookConfigurationList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]MutatingWebhookConfiguration{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Mutation) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Mutation{`,
+		`PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`,
+		`ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`,
+		`JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamedRuleWithOperations) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NamedRuleWithOperations{`,
+		`ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
+		`RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ParamKind) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ParamKind{`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ParamRef) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ParamRef{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ServiceReference{`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Path:` + valueToStringGenerated(this.Path) + `,`,
+		`Port:` + valueToStringGenerated(this.Port) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TypeChecking) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
+	for _, f := range this.ExpressionWarnings {
+		repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForExpressionWarnings += "}"
+	s := strings.Join([]string{`&TypeChecking{`,
+		`ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicy) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicyBinding) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicyBindingList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
+		`PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+		`ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+		`MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
+		`ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicyList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicySpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForValidations := "[]Validation{"
+	for _, f := range this.Validations {
+		repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForValidations += "}"
+	repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
+	for _, f := range this.AuditAnnotations {
+		repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForAuditAnnotations += "}"
+	repeatedStringForMatchConditions := "[]MatchCondition{"
+	for _, f := range this.MatchConditions {
+		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchConditions += "}"
+	repeatedStringForVariables := "[]Variable{"
+	for _, f := range this.Variables {
+		repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVariables += "}"
+	s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
+		`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+		`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+		`Validations:` + repeatedStringForValidations + `,`,
+		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+		`AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
+		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
+		`Variables:` + repeatedStringForVariables + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingAdmissionPolicyStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]Condition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
+		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+		`TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingWebhook) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRules := "[]RuleWithOperations{"
+	for _, f := range this.Rules {
+		repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForRules += "}"
+	repeatedStringForMatchConditions := "[]MatchCondition{"
+	for _, f := range this.MatchConditions {
+		repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchConditions += "}"
+	s := strings.Join([]string{`&ValidatingWebhook{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
+		`Rules:` + repeatedStringForRules + `,`,
+		`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
+		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+		`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
+		`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+		`ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`MatchConditions:` + repeatedStringForMatchConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingWebhookConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForWebhooks := "[]ValidatingWebhook{"
+	for _, f := range this.Webhooks {
+		repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForWebhooks += "}"
+	s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Webhooks:` + repeatedStringForWebhooks + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ValidatingWebhookConfigurationList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Validation) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Validation{`,
+		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Reason:` + valueToStringGenerated(this.Reason) + `,`,
+		`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Variable) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Variable{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WebhookClientConfig) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WebhookClientConfig{`,
+		`Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
+		`CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+		`URL:` + valueToStringGenerated(this.URL) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Expression = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ValueExpression = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldRef = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Warning = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *JSONPatch) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Expression = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Expression = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MatchResources) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NamespaceSelector == nil {
+				m.NamespaceSelector = &v1.LabelSelector{}
+			}
+			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ObjectSelector == nil {
+				m.ObjectSelector = &v1.LabelSelector{}
+			}
+			if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
+			if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
+			if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := MatchPolicyType(dAtA[iNdEx:postIndex])
+			m.MatchPolicy = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
 	}
-	s := strings.Join([]string{`&WebhookClientConfig{`,
-		`Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
-		`CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
-		`URL:` + valueToStringGenerated(this.URL) + `,`,
-		`}`,
-	}, "")
-	return s
+	return nil
 }
-func valueToStringGenerated(v interface{}) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
+func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
 	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("*%v", pv)
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
 }
-func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -3273,17 +5243,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -3293,29 +5263,30 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Key = string(dAtA[iNdEx:postIndex])
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -3325,23 +5296,25 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ValueExpression = string(dAtA[iNdEx:postIndex])
+			m.Items = append(m.Items, MutatingAdmissionPolicyBinding{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3364,7 +5337,7 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -3387,15 +5360,15 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
+			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
-		case 2:
+		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3423,13 +5396,49 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.FieldRef = string(dAtA[iNdEx:postIndex])
+			m.PolicyName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ParamRef == nil {
+				m.ParamRef = &ParamRef{}
+			}
+			if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -3439,23 +5448,27 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Warning = string(dAtA[iNdEx:postIndex])
+			if m.MatchResources == nil {
+				m.MatchResources = &MatchResources{}
+			}
+			if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3478,7 +5491,7 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -3501,17 +5514,17 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+			return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -3521,29 +5534,30 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Name = string(dAtA[iNdEx:postIndex])
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -3553,23 +5567,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Expression = string(dAtA[iNdEx:postIndex])
+			m.Items = append(m.Items, MutatingAdmissionPolicy{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3592,7 +5608,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *MatchResources) Unmarshal(dAtA []byte) error {
+func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -3615,15 +5631,15 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+			return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -3650,16 +5666,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.NamespaceSelector == nil {
-				m.NamespaceSelector = &v1.LabelSelector{}
+			if m.ParamKind == nil {
+				m.ParamKind = &ParamKind{}
 			}
-			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -3686,16 +5702,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.ObjectSelector == nil {
-				m.ObjectSelector = &v1.LabelSelector{}
+			if m.MatchConstraints == nil {
+				m.MatchConstraints = &MatchResources{}
 			}
-			if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -3722,14 +5738,14 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
-			if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			m.Variables = append(m.Variables, Variable{})
+			if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -3756,14 +5772,81 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
-			if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			m.Mutations = append(m.Mutations, Mutation{})
+			if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := FailurePolicyType(dAtA[iNdEx:postIndex])
+			m.FailurePolicy = &s
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchConditions = append(m.MatchConditions, MatchCondition{})
+			if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 7:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -3791,8 +5874,7 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			s := MatchPolicyType(dAtA[iNdEx:postIndex])
-			m.MatchPolicy = &s
+			m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -4160,7 +6242,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			s := ReinvocationPolicyType(dAtA[iNdEx:postIndex])
+			s := k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex])
 			m.ReinvocationPolicy = &s
 			iNdEx = postIndex
 		case 11:
@@ -4488,6 +6570,160 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *Mutation) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Mutation: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PatchType = PatchType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ApplyConfiguration == nil {
+				m.ApplyConfiguration = &ApplyConfiguration{}
+			}
+			if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.JSONPatch == nil {
+				m.JSONPatch = &JSONPatch{}
+			}
+			if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
index 30f99f64d..fb47a2005 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
@@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
 // Package-wide variables from generator "generated".
 option go_package = "k8s.io/api/admissionregistration/v1beta1";
 
+// ApplyConfiguration defines the desired configuration values of an object.
+message ApplyConfiguration {
+  // expression will be evaluated by CEL to create an apply configuration.
+  // ref: https://github.com/google/cel-spec
+  //
+  // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+  // returns an apply configuration to set a single field:
+  //
+  // 	Object{
+  // 	  spec: Object.spec{
+  // 	    serviceAccountName: "example"
+  // 	  }
+  // 	}
+  //
+  // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+  // values not included in the apply configuration.
+  //
+  // CEL expressions have access to the object types needed to create apply configurations:
+  //
+  // - 'Object' - CEL type of the resource object.
+  // - 'Object.' - CEL type of object field (such as 'Object.spec')
+  // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+  //
+  // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+  //
+  // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+  // - 'oldObject' - The existing object. The value is null for CREATE requests.
+  // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+  // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+  // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+  // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+  //   For example, a variable named 'foo' can be accessed as 'variables.foo'.
+  // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+  //   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+  // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+  //   request resource.
+  //
+  // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+  // object. No other metadata properties are accessible.
+  //
+  // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+  // Required.
+  optional string expression = 1;
+}
+
 // AuditAnnotation describes how to produce an audit annotation for an API request.
 message AuditAnnotation {
   // key specifies the audit annotation key. The audit annotation keys of
@@ -79,6 +124,75 @@ message ExpressionWarning {
   optional string warning = 3;
 }
 
+// JSONPatch defines a JSON Patch.
+message JSONPatch {
+  // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+  // ref: https://github.com/google/cel-spec
+  //
+  // expression must return an array of JSONPatch values.
+  //
+  // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+  //
+  // 	  [
+  // 	    JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+  // 	    JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+  // 	  ]
+  //
+  // To define an object for the patch value, use Object types. For example:
+  //
+  // 	  [
+  // 	    JSONPatch{
+  // 	      op: "add",
+  // 	      path: "/spec/selector",
+  // 	      value: Object.spec.selector{matchLabels: {"environment": "test"}}
+  // 	    }
+  // 	  ]
+  //
+  // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+  //
+  // 	  [
+  // 	    JSONPatch{
+  // 	      op: "add",
+  // 	      path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+  // 	      value: "test"
+  // 	    },
+  // 	  ]
+  //
+  // CEL expressions have access to the types needed to create JSON patches and objects:
+  //
+  // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+  //   See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+  //   integer, array, map or object.  If set, the 'path' and 'from' fields must be set to a
+  //   [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+  //   function may be used to escape path keys containing '/' and '~'.
+  // - 'Object' - CEL type of the resource object.
+  // - 'Object.' - CEL type of object field (such as 'Object.spec')
+  // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+  //
+  // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+  //
+  // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+  // - 'oldObject' - The existing object. The value is null for CREATE requests.
+  // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+  // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+  // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+  // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+  //   For example, a variable named 'foo' can be accessed as 'variables.foo'.
+  // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+  //   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+  // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+  //   request resource.
+  //
+  // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+  // as well as:
+  //
+  // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and  '/' are escaped as '~0' and `~1' respectively).
+  //
+  // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+  // Required.
+  optional string expression = 1;
+}
+
 // MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.
 message MatchCondition {
   // Name is an identifier for this match condition, used for strategic merging of MatchConditions,
@@ -203,6 +317,173 @@ message MatchResources {
   optional string matchPolicy = 7;
 }
 
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
+message MutatingAdmissionPolicy {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the MutatingAdmissionPolicy.
+  optional MutatingAdmissionPolicySpec spec = 2;
+}
+
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+message MutatingAdmissionPolicyBinding {
+  // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+  optional MutatingAdmissionPolicyBindingSpec spec = 2;
+}
+
+// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
+message MutatingAdmissionPolicyBindingList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of PolicyBinding.
+  repeated MutatingAdmissionPolicyBinding items = 2;
+}
+
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
+message MutatingAdmissionPolicyBindingSpec {
+  // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+  // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+  // Required.
+  optional string policyName = 1;
+
+  // paramRef specifies the parameter resource used to configure the admission control policy.
+  // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+  // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+  // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+  // +optional
+  optional ParamRef paramRef = 2;
+
+  // matchResources limits what resources match this binding and may be mutated by it.
+  // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+  // matchConditions before the resource may be mutated.
+  // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+  // and matchConditions must match for the resource to be mutated.
+  // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+  // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+  // The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
+  // '*' matches CREATE, UPDATE and CONNECT.
+  // +optional
+  optional MatchResources matchResources = 3;
+}
+
+// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
+message MutatingAdmissionPolicyList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ValidatingAdmissionPolicy.
+  repeated MutatingAdmissionPolicy items = 2;
+}
+
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
+message MutatingAdmissionPolicySpec {
+  // paramKind specifies the kind of resources used to parameterize this policy.
+  // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+  // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+  // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+  // +optional
+  optional ParamKind paramKind = 1;
+
+  // matchConstraints specifies what resources this policy is designed to validate.
+  // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+  // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+  // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+  // The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
+  // '*' matches CREATE, UPDATE and CONNECT.
+  // Required.
+  optional MatchResources matchConstraints = 2;
+
+  // variables contain definitions of variables that can be used in composition of other expressions.
+  // Each variable is defined as a named CEL expression.
+  // The variables defined here will be available under `variables` in other expressions of the policy
+  // except matchConditions because matchConditions are evaluated before the rest of the policy.
+  //
+  // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+  // Thus, variables must be sorted by the order of first appearance and acyclic.
+  // +listType=atomic
+  // +optional
+  repeated Variable variables = 3;
+
+  // mutations contain operations to perform on matching objects.
+  // mutations may not be empty; a minimum of one mutation is required.
+  // mutations are evaluated in order, and are reinvoked according to
+  // the reinvocationPolicy.
+  // The mutations of a policy are invoked for each binding of this policy
+  // and reinvocation of mutations occurs on a per binding basis.
+  //
+  // +listType=atomic
+  // +optional
+  repeated Mutation mutations = 4;
+
+  // failurePolicy defines how to handle failures for the admission policy. Failures can
+  // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+  // or mis-configured policy definitions or bindings.
+  //
+  // A policy is invalid if paramKind refers to a non-existent Kind.
+  // A binding is invalid if paramRef.name refers to a non-existent resource.
+  //
+  // failurePolicy does not define how validations that evaluate to false are handled.
+  //
+  // Allowed values are Ignore or Fail. Defaults to Fail.
+  // +optional
+  optional string failurePolicy = 5;
+
+  // matchConditions is a list of conditions that must be met for a request to be validated.
+  // Match conditions filter requests that have already been matched by the matchConstraints.
+  // An empty list of matchConditions matches all requests.
+  // There are a maximum of 64 match conditions allowed.
+  //
+  // If a parameter object is provided, it can be accessed via the `params` handle in the same
+  // manner as validation expressions.
+  //
+  // The exact matching logic is (in order):
+  //   1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+  //   2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+  //   3. If any matchCondition evaluates to an error (but none are FALSE):
+  //      - If failurePolicy=Fail, reject the request
+  //      - If failurePolicy=Ignore, the policy is skipped
+  //
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  // +optional
+  repeated MatchCondition matchConditions = 6;
+
+  // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+  // as part of a single admission evaluation.
+  // Allowed values are "Never" and "IfNeeded".
+  //
+  // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+  //
+  // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+  // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies.  Mutations are only
+  // reinvoked when mutations change the object after this mutation is invoked.
+  // Required.
+  optional string reinvocationPolicy = 7;
+}
+
 // MutatingWebhook describes an admission webhook and the resources and operations it applies to.
 message MutatingWebhook {
   // The name of the admission webhook.
@@ -401,6 +682,26 @@ message MutatingWebhookConfigurationList {
   repeated MutatingWebhookConfiguration items = 2;
 }
 
+// Mutation specifies the CEL expression which is used to apply the Mutation.
+message Mutation {
+  // patchType indicates the patch strategy used.
+  // Allowed values are "ApplyConfiguration" and "JSONPatch".
+  // Required.
+  //
+  // +unionDiscriminator
+  optional string patchType = 2;
+
+  // applyConfiguration defines the desired configuration values of an object.
+  // The configuration is applied to the admission object using
+  // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+  // A CEL expression is used to create apply configuration.
+  optional ApplyConfiguration applyConfiguration = 3;
+
+  // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+  // A CEL expression is used to create the JSON patch.
+  optional JSONPatch jsonPatch = 4;
+}
+
 // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
 // +structType=atomic
 message NamedRuleWithOperations {
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
index 363233a2f..be64c4a5f 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
@@ -54,6 +54,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 		&ValidatingAdmissionPolicyList{},
 		&ValidatingAdmissionPolicyBinding{},
 		&ValidatingAdmissionPolicyBindingList{},
+		&MutatingAdmissionPolicy{},
+		&MutatingAdmissionPolicyList{},
+		&MutatingAdmissionPolicyBinding{},
+		&MutatingAdmissionPolicyBindingList{},
 	)
 	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
 	return nil
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
index 0f5903123..cffdda82c 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
@@ -1073,7 +1073,7 @@ type MutatingWebhook struct {
 }
 
 // ReinvocationPolicyType specifies what type of policy the admission hook uses.
-type ReinvocationPolicyType string
+type ReinvocationPolicyType = v1.ReinvocationPolicyType
 
 const (
 	// NeverReinvocationPolicy indicates that the webhook must not be called more than once in a
@@ -1197,3 +1197,332 @@ type MatchCondition struct {
 	// Required.
 	Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"`
 }
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
+type MutatingAdmissionPolicy struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Specification of the desired behavior of the MutatingAdmissionPolicy.
+	Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
+type MutatingAdmissionPolicyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// List of ValidatingAdmissionPolicy.
+	Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
+type MutatingAdmissionPolicySpec struct {
+	// paramKind specifies the kind of resources used to parameterize this policy.
+	// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+	// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+	// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+	// +optional
+	ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
+
+	// matchConstraints specifies what resources this policy is designed to validate.
+	// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+	// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+	// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+	// The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
+	// '*' matches CREATE, UPDATE and CONNECT.
+	// Required.
+	MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
+
+	// variables contain definitions of variables that can be used in composition of other expressions.
+	// Each variable is defined as a named CEL expression.
+	// The variables defined here will be available under `variables` in other expressions of the policy
+	// except matchConditions because matchConditions are evaluated before the rest of the policy.
+	//
+	// The expression of a variable can refer to other variables defined earlier in the list but not those after.
+	// Thus, variables must be sorted by the order of first appearance and acyclic.
+	// +listType=atomic
+	// +optional
+	Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"`
+
+	// mutations contain operations to perform on matching objects.
+	// mutations may not be empty; a minimum of one mutation is required.
+	// mutations are evaluated in order, and are reinvoked according to
+	// the reinvocationPolicy.
+	// The mutations of a policy are invoked for each binding of this policy
+	// and reinvocation of mutations occurs on a per binding basis.
+	//
+	// +listType=atomic
+	// +optional
+	Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"`
+
+	// failurePolicy defines how to handle failures for the admission policy. Failures can
+	// occur from CEL expression parse errors, type check errors, runtime errors and invalid
+	// or mis-configured policy definitions or bindings.
+	//
+	// A policy is invalid if paramKind refers to a non-existent Kind.
+	// A binding is invalid if paramRef.name refers to a non-existent resource.
+	//
+	// failurePolicy does not define how validations that evaluate to false are handled.
+	//
+	// Allowed values are Ignore or Fail. Defaults to Fail.
+	// +optional
+	FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"`
+
+	// matchConditions is a list of conditions that must be met for a request to be validated.
+	// Match conditions filter requests that have already been matched by the matchConstraints.
+	// An empty list of matchConditions matches all requests.
+	// There are a maximum of 64 match conditions allowed.
+	//
+	// If a parameter object is provided, it can be accessed via the `params` handle in the same
+	// manner as validation expressions.
+	//
+	// The exact matching logic is (in order):
+	//   1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+	//   2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+	//   3. If any matchCondition evaluates to an error (but none are FALSE):
+	//      - If failurePolicy=Fail, reject the request
+	//      - If failurePolicy=Ignore, the policy is skipped
+	//
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	// +optional
+	MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
+
+	// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+	// as part of a single admission evaluation.
+	// Allowed values are "Never" and "IfNeeded".
+	//
+	// Never: These mutations will not be called more than once per binding in a single admission evaluation.
+	//
+	// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+	// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies.  Mutations are only
+	// reinvoked when mutations change the object after this mutation is invoked.
+	// Required.
+	ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"`
+}
+
+// Mutation specifies the CEL expression which is used to apply the Mutation.
+type Mutation struct {
+	// patchType indicates the patch strategy used.
+	// Allowed values are "ApplyConfiguration" and "JSONPatch".
+	// Required.
+	//
+	// +unionDiscriminator
+	PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"`
+
+	// applyConfiguration defines the desired configuration values of an object.
+	// The configuration is applied to the admission object using
+	// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+	// A CEL expression is used to create apply configuration.
+	ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"`
+
+	// jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+	// A CEL expression is used to create the JSON patch.
+	JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"`
+}
+
+// PatchType specifies the type of patch operation for a mutation.
+// +enum
+type PatchType string
+
+const (
+	// ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
+	PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
+	// JSONPatch indicates that the object is mutated through JSON Patch.
+	PatchTypeJSONPatch PatchType = "JSONPatch"
+)
+
+// ApplyConfiguration defines the desired configuration values of an object.
+type ApplyConfiguration struct {
+	// expression will be evaluated by CEL to create an apply configuration.
+	// ref: https://github.com/google/cel-spec
+	//
+	// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+	// returns an apply configuration to set a single field:
+	//
+	//	Object{
+	//	  spec: Object.spec{
+	//	    serviceAccountName: "example"
+	//	  }
+	//	}
+	//
+	// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+	// values not included in the apply configuration.
+	//
+	// CEL expressions have access to the object types needed to create apply configurations:
+	//
+	// - 'Object' - CEL type of the resource object.
+	// - 'Object.' - CEL type of object field (such as 'Object.spec')
+	// - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+	//
+	// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+	//
+	// - 'object' - The object from the incoming request. The value is null for DELETE requests.
+	// - 'oldObject' - The existing object. The value is null for CREATE requests.
+	// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+	// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+	// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+	// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+	//   For example, a variable named 'foo' can be accessed as 'variables.foo'.
+	// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+	//   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+	// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+	//   request resource.
+	//
+	// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+	// object. No other metadata properties are accessible.
+	//
+	// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+	// Required.
+	Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
+}
+
+// JSONPatch defines a JSON Patch.
+type JSONPatch struct {
+	// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+	// ref: https://github.com/google/cel-spec
+	//
+	// expression must return an array of JSONPatch values.
+	//
+	// For example, this CEL expression returns a JSON patch to conditionally modify a value:
+	//
+	//	  [
+	//	    JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+	//	    JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+	//	  ]
+	//
+	// To define an object for the patch value, use Object types. For example:
+	//
+	//	  [
+	//	    JSONPatch{
+	//	      op: "add",
+	//	      path: "/spec/selector",
+	//	      value: Object.spec.selector{matchLabels: {"environment": "test"}}
+	//	    }
+	//	  ]
+	//
+	// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+	//
+	//	  [
+	//	    JSONPatch{
+	//	      op: "add",
+	//	      path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+	//	      value: "test"
+	//	    },
+	//	  ]
+	//
+	// CEL expressions have access to the types needed to create JSON patches and objects:
+	//
+	// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+	//   See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+	//   integer, array, map or object.  If set, the 'path' and 'from' fields must be set to a
+	//   [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+	//   function may be used to escape path keys containing '/' and '~'.
+	// - 'Object' - CEL type of the resource object.
+	// - 'Object.' - CEL type of object field (such as 'Object.spec')
+	// - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+	//
+	// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+	//
+	// - 'object' - The object from the incoming request. The value is null for DELETE requests.
+	// - 'oldObject' - The existing object. The value is null for CREATE requests.
+	// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+	// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+	// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+	// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+	//   For example, a variable named 'foo' can be accessed as 'variables.foo'.
+	// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+	//   See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+	// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+	//   request resource.
+	//
+	// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+	// as well as:
+	//
+	// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and  '/' are escaped as '~0' and `~1' respectively).
+	//
+	//
+	// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+	// Required.
+	Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+type MutatingAdmissionPolicyBinding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+	Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
+type MutatingAdmissionPolicyBindingList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// List of PolicyBinding.
+	Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
+type MutatingAdmissionPolicyBindingSpec struct {
+	// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+	// If the referenced resource does not exist, this binding is considered invalid and will be ignored
+	// Required.
+	PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
+
+	// paramRef specifies the parameter resource used to configure the admission control policy.
+	// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+	// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+	// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+	// +optional
+	ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
+
+	// matchResources limits what resources match this binding and may be mutated by it.
+	// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+	// matchConditions before the resource may be mutated.
+	// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+	// and matchConditions must match for the resource to be mutated.
+	// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+	// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+	// The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched.
+	// '*' matches CREATE, UPDATE and CONNECT.
+	// +optional
+	MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
index cc1509b53..1a97c9472 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
@@ -27,6 +27,15 @@ package v1beta1
 // Those methods can be generated by using hack/update-codegen.sh
 
 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ApplyConfiguration = map[string]string{
+	"":           "ApplyConfiguration defines the desired configuration values of an object.",
+	"expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t  spec: Object.spec{\n\t    serviceAccountName: \"example\"\n\t  }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n  For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n  See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n  request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
+}
+
+func (ApplyConfiguration) SwaggerDoc() map[string]string {
+	return map_ApplyConfiguration
+}
+
 var map_AuditAnnotation = map[string]string{
 	"":                "AuditAnnotation describes how to produce an audit annotation for an API request.",
 	"key":             "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
@@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string {
 	return map_ExpressionWarning
 }
 
+var map_JSONPatch = map[string]string{
+	"":           "JSONPatch defines a JSON Patch.",
+	"expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t  [\n\t    JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t    JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t  ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t  [\n\t    JSONPatch{\n\t      op: \"add\",\n\t      path: \"/spec/selector\",\n\t      value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t    }\n\t  ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t  [\n\t    JSONPatch{\n\t      op: \"add\",\n\t      path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t      value: \"test\"\n\t    },\n\t  ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n  See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n  integer, array, map or object.  If set, the 'path' and 'from' fields must be set to a\n  [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n  function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n  For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n  See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n  request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and  '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
+}
+
+func (JSONPatch) SwaggerDoc() map[string]string {
+	return map_JSONPatch
+}
+
 var map_MatchCondition = map[string]string{
 	"":           "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.",
 	"name":       "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName',  or 'my.name',  or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.",
@@ -70,6 +88,72 @@ func (MatchResources) SwaggerDoc() map[string]string {
 	return map_MatchResources
 }
 
+var map_MutatingAdmissionPolicy = map[string]string{
+	"":         "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+	"spec":     "Specification of the desired behavior of the MutatingAdmissionPolicy.",
+}
+
+func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string {
+	return map_MutatingAdmissionPolicy
+}
+
+var map_MutatingAdmissionPolicyBinding = map[string]string{
+	"":         "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
+	"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+	"spec":     "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.",
+}
+
+func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
+	return map_MutatingAdmissionPolicyBinding
+}
+
+var map_MutatingAdmissionPolicyBindingList = map[string]string{
+	"":         "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of PolicyBinding.",
+}
+
+func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
+	return map_MutatingAdmissionPolicyBindingList
+}
+
+var map_MutatingAdmissionPolicyBindingSpec = map[string]string{
+	"":               "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.",
+	"policyName":     "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
+	"paramRef":       "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
+	"matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.",
+}
+
+func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
+	return map_MutatingAdmissionPolicyBindingSpec
+}
+
+var map_MutatingAdmissionPolicyList = map[string]string{
+	"":         "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of ValidatingAdmissionPolicy.",
+}
+
+func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string {
+	return map_MutatingAdmissionPolicyList
+}
+
+var map_MutatingAdmissionPolicySpec = map[string]string{
+	"":                   "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.",
+	"paramKind":          "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.",
+	"matchConstraints":   "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed.  The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.",
+	"variables":          "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.",
+	"mutations":          "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.",
+	"failurePolicy":      "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
+	"matchConditions":    "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n  1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n  2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n  3. If any matchCondition evaluates to an error (but none are FALSE):\n     - If failurePolicy=Fail, reject the request\n     - If failurePolicy=Ignore, the policy is skipped",
+	"reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies.  Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.",
+}
+
+func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
+	return map_MutatingAdmissionPolicySpec
+}
+
 var map_MutatingWebhook = map[string]string{
 	"":                        "MutatingWebhook describes an admission webhook and the resources and operations it applies to.",
 	"name":                    "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
@@ -110,6 +194,17 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
 	return map_MutatingWebhookConfigurationList
 }
 
+var map_Mutation = map[string]string{
+	"":                   "Mutation specifies the CEL expression which is used to apply the Mutation.",
+	"patchType":          "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.",
+	"applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.",
+	"jsonPatch":          "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.",
+}
+
+func (Mutation) SwaggerDoc() map[string]string {
+	return map_Mutation
+}
+
 var map_NamedRuleWithOperations = map[string]string{
 	"":              "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
 	"resourceNames": "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.",
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
index 4c10b1d11..3749a3d14 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
@@ -27,6 +27,22 @@ import (
 	runtime "k8s.io/apimachinery/pkg/runtime"
 )
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
+func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(ApplyConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
 	*out = *in
@@ -59,6 +75,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
+func (in *JSONPatch) DeepCopy() *JSONPatch {
+	if in == nil {
+		return nil
+	}
+	out := new(JSONPatch)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
 	*out = *in
@@ -120,6 +152,200 @@ func (in *MatchResources) DeepCopy() *MatchResources {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
+func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingAdmissionPolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
+func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingAdmissionPolicyBinding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]MutatingAdmissionPolicyBinding, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingAdmissionPolicyBindingList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
+	*out = *in
+	if in.ParamRef != nil {
+		in, out := &in.ParamRef, &out.ParamRef
+		*out = new(ParamRef)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.MatchResources != nil {
+		in, out := &in.MatchResources, &out.MatchResources
+		*out = new(MatchResources)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
+func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingAdmissionPolicyBindingSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]MutatingAdmissionPolicy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
+func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingAdmissionPolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
+	*out = *in
+	if in.ParamKind != nil {
+		in, out := &in.ParamKind, &out.ParamKind
+		*out = new(ParamKind)
+		**out = **in
+	}
+	if in.MatchConstraints != nil {
+		in, out := &in.MatchConstraints, &out.MatchConstraints
+		*out = new(MatchResources)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Variables != nil {
+		in, out := &in.Variables, &out.Variables
+		*out = make([]Variable, len(*in))
+		copy(*out, *in)
+	}
+	if in.Mutations != nil {
+		in, out := &in.Mutations, &out.Mutations
+		*out = make([]Mutation, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.FailurePolicy != nil {
+		in, out := &in.FailurePolicy, &out.FailurePolicy
+		*out = new(FailurePolicyType)
+		**out = **in
+	}
+	if in.MatchConditions != nil {
+		in, out := &in.MatchConditions, &out.MatchConditions
+		*out = make([]MatchCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
+func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
+	if in == nil {
+		return nil
+	}
+	out := new(MutatingAdmissionPolicySpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
 	*out = *in
@@ -168,7 +394,7 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
 	}
 	if in.ReinvocationPolicy != nil {
 		in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy
-		*out = new(ReinvocationPolicyType)
+		*out = new(admissionregistrationv1.ReinvocationPolicyType)
 		**out = **in
 	}
 	if in.MatchConditions != nil {
@@ -255,6 +481,32 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
 	return nil
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Mutation) DeepCopyInto(out *Mutation) {
+	*out = *in
+	if in.ApplyConfiguration != nil {
+		in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
+		*out = new(ApplyConfiguration)
+		**out = **in
+	}
+	if in.JSONPatch != nil {
+		in, out := &in.JSONPatch, &out.JSONPatch
+		*out = new(JSONPatch)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
+func (in *Mutation) DeepCopy() *Mutation {
+	if in == nil {
+		return nil
+	}
+	out := new(Mutation)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
 	*out = *in
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
index c1be5122a..4fc0596b3 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -25,6 +25,78 @@ import (
 	schema "k8s.io/apimachinery/pkg/runtime/schema"
 )
 
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
+	return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
+	return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
+	return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
+	return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
+	return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
+	return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
+	return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
+	return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
+	return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
+	return 1, 40
+}
+
 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
 // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
 func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
index 38c8997e9..5885a6222 100644
--- a/vendor/k8s.io/api/apps/v1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -530,7 +530,7 @@ message RollingUpdateDaemonSet {
   // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
   // on that node is marked deleted. If the old pod becomes unavailable for any
   // reason (Ready transitions to false, is evicted, or is drained) an updated
-  // pod is immediatedly created on that node without considering surge limits.
+  // pod is immediately created on that node without considering surge limits.
   // Allowing surge implies the possibility that the resources consumed by the
   // daemonset on any given node can double if the readiness check fails, and
   // so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
index 1362d875d..4cf54cc99 100644
--- a/vendor/k8s.io/api/apps/v1/types.go
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -635,7 +635,7 @@ type RollingUpdateDaemonSet struct {
 	// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
 	// on that node is marked deleted. If the old pod becomes unavailable for any
 	// reason (Ready transitions to false, is evicted, or is drained) an updated
-	// pod is immediatedly created on that node without considering surge limits.
+	// pod is immediately created on that node without considering surge limits.
 	// Allowing surge implies the possibility that the resources consumed by the
 	// daemonset on any given node can double if the readiness check fails, and
 	// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index f44ba7bc3..ac54033fd 100644
--- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
 var map_RollingUpdateDaemonSet = map[string]string{
 	"":               "Spec to control the desired behavior of daemon set rolling update.",
 	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
-	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
+	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
 }
 
 func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
index 0601efc3c..b61dc490d 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -316,6 +316,9 @@ message Scale {
 message ScaleSpec {
   // replicas is the number of observed instances of the scaled object.
   // +optional
+  // +k8s:optional
+  // +default=0
+  // +k8s:minimum=0
   optional int32 replicas = 1;
 }
 
diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
index 5530c990d..cd140be12 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -33,6 +33,9 @@ const (
 type ScaleSpec struct {
 	// replicas is the number of observed instances of the scaled object.
 	// +optional
+	// +k8s:optional
+	// +default=0
+	// +k8s:minimum=0
 	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
 }
 
@@ -60,6 +63,7 @@ type ScaleStatus struct {
 // +k8s:prerelease-lifecycle-gen:deprecated=1.8
 // +k8s:prerelease-lifecycle-gen:removed=1.16
 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
+// +k8s:isSubresource=/scale
 
 // Scale represents a scaling request for a resource.
 type Scale struct {
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
index 68c463e25..37c6d5ae1 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -536,7 +536,7 @@ message RollingUpdateDaemonSet {
   // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
   // on that node is marked deleted. If the old pod becomes unavailable for any
   // reason (Ready transitions to false, is evicted, or is drained) an updated
-  // pod is immediatedly created on that node without considering surge limits.
+  // pod is immediately created on that node without considering surge limits.
   // Allowing surge implies the possibility that the resources consumed by the
   // daemonset on any given node can double if the readiness check fails, and
   // so resource intensive daemonsets should take into account that they may
@@ -614,6 +614,9 @@ message Scale {
 message ScaleSpec {
   // desired number of instances for the scaled object.
   // +optional
+  // +k8s:optional
+  // +default=0
+  // +k8s:minimum=0
   optional int32 replicas = 1;
 }
 
diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
index 491afc59f..e9dc85df0 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -35,6 +35,9 @@ const (
 type ScaleSpec struct {
 	// desired number of instances for the scaled object.
 	// +optional
+	// +k8s:optional
+	// +default=0
+	// +k8s:minimum=0
 	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
 }
 
@@ -63,6 +66,7 @@ type ScaleStatus struct {
 // +k8s:prerelease-lifecycle-gen:deprecated=1.9
 // +k8s:prerelease-lifecycle-gen:removed=1.16
 // +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
+// +k8s:isSubresource=/scale
 
 // Scale represents a scaling request for a resource.
 type Scale struct {
@@ -681,7 +685,7 @@ type RollingUpdateDaemonSet struct {
 	// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
 	// on that node is marked deleted. If the old pod becomes unavailable for any
 	// reason (Ready transitions to false, is evicted, or is drained) an updated
-	// pod is immediatedly created on that node without considering surge limits.
+	// pod is immediately created on that node without considering surge limits.
 	// Allowing surge implies the possibility that the resources consumed by the
 	// daemonset on any given node can double if the readiness check fails, and
 	// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index 408943415..34d80af58 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -265,7 +265,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
 var map_RollingUpdateDaemonSet = map[string]string{
 	"":               "Spec to control the desired behavior of daemon set rolling update.",
 	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
-	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
+	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.",
 }
 
 func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto
index 37b05b855..ff529c969 100644
--- a/vendor/k8s.io/api/authorization/v1/generated.proto
+++ b/vendor/k8s.io/api/authorization/v1/generated.proto
@@ -167,16 +167,10 @@ message ResourceAttributes {
   optional string name = 7;
 
   // fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
-  //
-  // This field  is alpha-level. To use this field, you must enable the
-  // `AuthorizeWithSelectors` feature gate (disabled by default).
   // +optional
   optional FieldSelectorAttributes fieldSelector = 8;
 
   // labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
-  //
-  // This field  is alpha-level. To use this field, you must enable the
-  // `AuthorizeWithSelectors` feature gate (disabled by default).
   // +optional
   optional LabelSelectorAttributes labelSelector = 9;
 }
diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
index 36f5fa410..251e776b0 100644
--- a/vendor/k8s.io/api/authorization/v1/types.go
+++ b/vendor/k8s.io/api/authorization/v1/types.go
@@ -119,15 +119,9 @@ type ResourceAttributes struct {
 	// +optional
 	Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"`
 	// fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.
-	//
-	// This field  is alpha-level. To use this field, you must enable the
-	// `AuthorizeWithSelectors` feature gate (disabled by default).
 	// +optional
 	FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"`
 	// labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.
-	//
-	// This field  is alpha-level. To use this field, you must enable the
-	// `AuthorizeWithSelectors` feature gate (disabled by default).
 	// +optional
 	LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"`
 }
diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
index dc6b8a89e..29d0aa846 100644
--- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
@@ -87,8 +87,8 @@ var map_ResourceAttributes = map[string]string{
 	"resource":      "Resource is one of the existing resource types.  \"*\" means all.",
 	"subresource":   "Subresource is one of the existing resource types.  \"\" means none.",
 	"name":          "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.",
-	"fieldSelector": "fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.\n\nThis field  is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
-	"labelSelector": "labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.\n\nThis field  is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).",
+	"fieldSelector": "fieldSelector describes the limitation on access based on field.  It can only limit access, not broaden it.",
+	"labelSelector": "labelSelector describes the limitation on access based on labels.  It can only limit access, not broaden it.",
 }
 
 func (ResourceAttributes) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
index 68c35b6b2..a17d7989d 100644
--- a/vendor/k8s.io/api/autoscaling/v1/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
@@ -472,6 +472,9 @@ message Scale {
 message ScaleSpec {
   // replicas is the desired number of instances for the scaled object.
   // +optional
+  // +k8s:optional
+  // +default=0
+  // +k8s:minimum=0
   optional int32 replicas = 1;
 }
 
diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
index 85c609e5c..e1e8809fe 100644
--- a/vendor/k8s.io/api/autoscaling/v1/types.go
+++ b/vendor/k8s.io/api/autoscaling/v1/types.go
@@ -117,6 +117,7 @@ type HorizontalPodAutoscalerList struct {
 
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 // +k8s:prerelease-lifecycle-gen:introduced=1.2
+// +k8s:isSubresource=/scale
 
 // Scale represents a scaling request for a resource.
 type Scale struct {
@@ -138,6 +139,9 @@ type Scale struct {
 type ScaleSpec struct {
 	// replicas is the desired number of instances for the scaled object.
 	// +optional
+	// +k8s:optional
+	// +default=0
+	// +k8s:minimum=0
 	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
 }
 
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
index d3aeae0ad..c0ce8cef2 100644
--- a/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -226,7 +226,8 @@ message JobSpec {
   optional SuccessPolicy successPolicy = 16;
 
   // Specifies the number of retries before marking this job failed.
-  // Defaults to 6
+  // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
+  // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
   // +optional
   optional int32 backoffLimit = 7;
 
@@ -329,8 +330,6 @@ message JobSpec {
   //
   // When using podFailurePolicy, Failed is the the only allowed value.
   // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
-  // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
-  // This is on by default.
   // +optional
   optional string podReplacementPolicy = 14;
 
@@ -570,7 +569,7 @@ message PodFailurePolicyRule {
 message SuccessPolicy {
   // rules represents the list of alternative rules for the declaring the Jobs
   // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
-  // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
+  // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
   // The terminal state for such a Job has the "Complete" condition.
   // Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
   // other rules are ignored. At most 20 elements are allowed.
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
index 6c0007c21..9183c073d 100644
--- a/vendor/k8s.io/api/batch/v1/types.go
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -257,7 +257,7 @@ type PodFailurePolicy struct {
 type SuccessPolicy struct {
 	// rules represents the list of alternative rules for the declaring the Jobs
 	// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
-	// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
+	// the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
 	// The terminal state for such a Job has the "Complete" condition.
 	// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
 	// other rules are ignored. At most 20 elements are allowed.
@@ -347,7 +347,8 @@ type JobSpec struct {
 	SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
 
 	// Specifies the number of retries before marking this job failed.
-	// Defaults to 6
+	// Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
+	// When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
 	// +optional
 	BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
 
@@ -455,8 +456,6 @@ type JobSpec struct {
 	//
 	// When using podFailurePolicy, Failed is the the only allowed value.
 	// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
-	// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
-	// This is on by default.
 	// +optional
 	PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`
 
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index ffd4e4f5f..451f4609f 100644
--- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -117,7 +117,7 @@ var map_JobSpec = map[string]string{
 	"activeDeadlineSeconds":   "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
 	"podFailurePolicy":        "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.",
 	"successPolicy":           "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.",
-	"backoffLimit":            "Specifies the number of retries before marking this job failed. Defaults to 6",
+	"backoffLimit":            "Specifies the number of retries before marking this job failed. Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified. When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.",
 	"backoffLimitPerIndex":    "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable.",
 	"maxFailedIndexes":        "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5.",
 	"selector":                "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
@@ -126,7 +126,7 @@ var map_JobSpec = map[string]string{
 	"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
 	"completionMode":          "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
 	"suspend":                 "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
-	"podReplacementPolicy":    "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n  when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n  Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
+	"podReplacementPolicy":    "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n  when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n  Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.",
 	"managedBy":               "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).",
 }
 
@@ -206,7 +206,7 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string {
 
 var map_SuccessPolicy = map[string]string{
 	"":      "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
-	"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
+	"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SuccessCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
 }
 
 func (SuccessPolicy) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto
index dac7c7f5f..24528fc8b 100644
--- a/vendor/k8s.io/api/certificates/v1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1/generated.proto
@@ -39,6 +39,8 @@ option go_package = "k8s.io/api/certificates/v1";
 // This API can be used to request client certificates to authenticate to kube-apiserver
 // (with the "kubernetes.io/kube-apiserver-client" signerName),
 // or to obtain certificates from custom non-Kubernetes signers.
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
 message CertificateSigningRequest {
   // +optional
   optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -203,6 +205,11 @@ message CertificateSigningRequestStatus {
   // +listType=map
   // +listMapKey=type
   // +optional
+  // +k8s:listType=map
+  // +k8s:listMapKey=type
+  // +k8s:optional
+  // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+  // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
   repeated CertificateSigningRequestCondition conditions = 1;
 
   // certificate is populated with an issued certificate by the signer after an Approved condition is present.
diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go
index ba8009840..71203e80d 100644
--- a/vendor/k8s.io/api/certificates/v1/types.go
+++ b/vendor/k8s.io/api/certificates/v1/types.go
@@ -39,6 +39,8 @@ import (
 // This API can be used to request client certificates to authenticate to kube-apiserver
 // (with the "kubernetes.io/kube-apiserver-client" signerName),
 // or to obtain certificates from custom non-Kubernetes signers.
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
 type CertificateSigningRequest struct {
 	metav1.TypeMeta `json:",inline"`
 	// +optional
@@ -178,6 +180,11 @@ type CertificateSigningRequestStatus struct {
 	// +listType=map
 	// +listMapKey=type
 	// +optional
+	// +k8s:listType=map
+	// +k8s:listMapKey=type
+	// +k8s:optional
+	// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+	// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
 	Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
 
 	// certificate is populated with an issued certificate by the signer after an Approved condition is present.
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
index a62a40059..c260f0436 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
@@ -25,11 +25,14 @@ import (
 	io "io"
 
 	proto "github.com/gogo/protobuf/proto"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
 	math "math"
 	math_bits "math/bits"
 	reflect "reflect"
 	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
 )
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -127,10 +130,126 @@ func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
 
+func (m *PodCertificateRequest) Reset()      { *m = PodCertificateRequest{} }
+func (*PodCertificateRequest) ProtoMessage() {}
+func (*PodCertificateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f73d5fe56c015bb8, []int{3}
+}
+func (m *PodCertificateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodCertificateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodCertificateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodCertificateRequest.Merge(m, src)
+}
+func (m *PodCertificateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodCertificateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodCertificateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequest proto.InternalMessageInfo
+
+func (m *PodCertificateRequestList) Reset()      { *m = PodCertificateRequestList{} }
+func (*PodCertificateRequestList) ProtoMessage() {}
+func (*PodCertificateRequestList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f73d5fe56c015bb8, []int{4}
+}
+func (m *PodCertificateRequestList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodCertificateRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodCertificateRequestList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodCertificateRequestList.Merge(m, src)
+}
+func (m *PodCertificateRequestList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodCertificateRequestList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodCertificateRequestList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequestList proto.InternalMessageInfo
+
+func (m *PodCertificateRequestSpec) Reset()      { *m = PodCertificateRequestSpec{} }
+func (*PodCertificateRequestSpec) ProtoMessage() {}
+func (*PodCertificateRequestSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f73d5fe56c015bb8, []int{5}
+}
+func (m *PodCertificateRequestSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodCertificateRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodCertificateRequestSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodCertificateRequestSpec.Merge(m, src)
+}
+func (m *PodCertificateRequestSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodCertificateRequestSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodCertificateRequestSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequestSpec proto.InternalMessageInfo
+
+func (m *PodCertificateRequestStatus) Reset()      { *m = PodCertificateRequestStatus{} }
+func (*PodCertificateRequestStatus) ProtoMessage() {}
+func (*PodCertificateRequestStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f73d5fe56c015bb8, []int{6}
+}
+func (m *PodCertificateRequestStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodCertificateRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodCertificateRequestStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodCertificateRequestStatus.Merge(m, src)
+}
+func (m *PodCertificateRequestStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodCertificateRequestStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodCertificateRequestStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateRequestStatus proto.InternalMessageInfo
+
 func init() {
 	proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle")
 	proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList")
 	proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec")
+	proto.RegisterType((*PodCertificateRequest)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequest")
+	proto.RegisterType((*PodCertificateRequestList)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestList")
+	proto.RegisterType((*PodCertificateRequestSpec)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestSpec")
+	proto.RegisterType((*PodCertificateRequestStatus)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestStatus")
 }
 
 func init() {
@@ -138,35 +257,65 @@ func init() {
 }
 
 var fileDescriptor_f73d5fe56c015bb8 = []byte{
-	// 437 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40,
-	0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9,
-	0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae,
-	0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f,
-	0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a,
-	0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c,
-	0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02,
-	0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26,
-	0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c,
-	0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9,
-	0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a,
-	0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14,
-	0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c,
-	0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47,
-	0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a,
-	0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50,
-	0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57,
-	0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55,
-	0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46,
-	0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f,
-	0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8,
-	0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2,
-	0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89,
-	0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34,
-	0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c,
-	0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6,
-	0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb,
-	0xdd, 0x99, 0x03, 0x00, 0x00,
+	// 918 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0xe3, 0x44,
+	0x14, 0xc7, 0xe3, 0xb6, 0x69, 0x9b, 0x49, 0x5b, 0xda, 0x61, 0x17, 0x99, 0x22, 0x39, 0x21, 0x07,
+	0x54, 0x90, 0xb0, 0xb7, 0xa5, 0xb0, 0x2b, 0x10, 0x48, 0x75, 0x0a, 0x52, 0xe9, 0x6e, 0x36, 0x9a,
+	0x74, 0xf9, 0xb1, 0x5a, 0x24, 0x1c, 0xe7, 0x25, 0x19, 0x1a, 0x7b, 0x8c, 0x67, 0x5c, 0xb5, 0x37,
+	0x24, 0xfe, 0x01, 0xfe, 0x23, 0xae, 0x3d, 0x2e, 0x5c, 0xd8, 0x53, 0xa0, 0xe6, 0x6f, 0xe0, 0xb2,
+	0x27, 0xe4, 0xb1, 0x9d, 0x5f, 0x4e, 0xb6, 0xd9, 0x1e, 0x7a, 0xcb, 0xbc, 0x79, 0xdf, 0xcf, 0xfb,
+	0xbe, 0x99, 0x37, 0x56, 0xd0, 0xbd, 0xd3, 0x07, 0x5c, 0xa7, 0xcc, 0xb0, 0x3c, 0x6a, 0xd8, 0xe0,
+	0x0b, 0xda, 0xa6, 0xb6, 0x25, 0x80, 0x1b, 0x67, 0xbb, 0x56, 0xcf, 0xeb, 0x5a, 0xbb, 0x46, 0x07,
+	0x5c, 0xf0, 0x2d, 0x01, 0x2d, 0xdd, 0xf3, 0x99, 0x60, 0xb8, 0x1c, 0x2b, 0x74, 0xcb, 0xa3, 0xfa,
+	0xa8, 0x42, 0x4f, 0x15, 0xdb, 0x1f, 0x76, 0xa8, 0xe8, 0x06, 0x4d, 0xdd, 0x66, 0x8e, 0xd1, 0x61,
+	0x1d, 0x66, 0x48, 0x61, 0x33, 0x68, 0xcb, 0x95, 0x5c, 0xc8, 0x5f, 0x31, 0x70, 0x7b, 0x7f, 0x68,
+	0xc1, 0xb1, 0xec, 0x2e, 0x75, 0xc1, 0xbf, 0x30, 0xbc, 0xd3, 0x4e, 0x14, 0xe0, 0x86, 0x03, 0xc2,
+	0x32, 0xce, 0x32, 0x36, 0xb6, 0x8d, 0x59, 0x2a, 0x3f, 0x70, 0x05, 0x75, 0x20, 0x23, 0xf8, 0xe4,
+	0x3a, 0x01, 0xb7, 0xbb, 0xe0, 0x58, 0x93, 0xba, 0xca, 0x9f, 0x0a, 0xc2, 0xd5, 0x5e, 0xc0, 0x05,
+	0xf8, 0x27, 0x7e, 0xc0, 0x85, 0x19, 0xb8, 0xad, 0x1e, 0xe0, 0x1f, 0xd1, 0x6a, 0x64, 0xad, 0x65,
+	0x09, 0x4b, 0x55, 0xca, 0xca, 0x4e, 0x71, 0xef, 0x9e, 0x3e, 0x3c, 0x99, 0x41, 0x05, 0xdd, 0x3b,
+	0xed, 0x44, 0x01, 0xae, 0x47, 0xd9, 0xfa, 0xd9, 0xae, 0xfe, 0xb8, 0xf9, 0x13, 0xd8, 0xe2, 0x11,
+	0x08, 0xcb, 0xc4, 0x97, 0xfd, 0x52, 0x2e, 0xec, 0x97, 0xd0, 0x30, 0x46, 0x06, 0x54, 0xfc, 0x14,
+	0x2d, 0x71, 0x0f, 0x6c, 0x75, 0x41, 0xd2, 0x1f, 0xe8, 0xd7, 0x9d, 0xbb, 0x9e, 0x75, 0xd9, 0xf0,
+	0xc0, 0x36, 0xd7, 0x92, 0x2a, 0x4b, 0xd1, 0x8a, 0x48, 0x66, 0xe5, 0x0f, 0x05, 0xbd, 0x95, 0x4d,
+	0x7f, 0x48, 0xb9, 0xc0, 0xcf, 0x32, 0x8d, 0xe9, 0xf3, 0x35, 0x16, 0xa9, 0x65, 0x5b, 0x9b, 0x49,
+	0xc1, 0xd5, 0x34, 0x32, 0xd2, 0xd4, 0xf7, 0x28, 0x4f, 0x05, 0x38, 0x5c, 0x5d, 0x28, 0x2f, 0xee,
+	0x14, 0xf7, 0xf6, 0x6f, 0xd2, 0x95, 0xb9, 0x9e, 0x14, 0xc8, 0x1f, 0x45, 0x28, 0x12, 0x13, 0x2b,
+	0xbf, 0x4e, 0xed, 0x29, 0x6a, 0x1a, 0xef, 0x21, 0xc4, 0x69, 0xc7, 0x05, 0xbf, 0x66, 0x39, 0x20,
+	0xbb, 0x2a, 0x0c, 0x0f, 0xbf, 0x31, 0xd8, 0x21, 0x23, 0x59, 0xf8, 0x63, 0x54, 0x14, 0x43, 0x8c,
+	0xbc, 0x85, 0x82, 0xf9, 0x66, 0x22, 0x2a, 0x8e, 0x54, 0x20, 0xa3, 0x79, 0x95, 0xdf, 0x17, 0xd0,
+	0xdd, 0x3a, 0x6b, 0x55, 0x87, 0xbd, 0x10, 0xf8, 0x39, 0x00, 0x2e, 0x6e, 0x61, 0x62, 0x7e, 0x18,
+	0x9b, 0x98, 0xcf, 0xae, 0x3f, 0xdb, 0xa9, 0x46, 0x67, 0x0d, 0x0d, 0x06, 0xb4, 0xcc, 0x85, 0x25,
+	0x02, 0xae, 0x2e, 0xca, 0x02, 0x9f, 0xdf, 0xb4, 0x80, 0x84, 0x98, 0x1b, 0x49, 0x89, 0xe5, 0x78,
+	0x4d, 0x12, 0x78, 0xe5, 0x2f, 0x05, 0xbd, 0x3d, 0x55, 0x77, 0x0b, 0xe3, 0xf9, 0x6c, 0x7c, 0x3c,
+	0xef, 0xdf, 0xb0, 0xc3, 0x19, 0x13, 0xfa, 0x5f, 0x7e, 0x46, 0x67, 0x37, 0x1e, 0xd2, 0xf7, 0xd1,
+	0x8a, 0xc7, 0x5a, 0x52, 0x10, 0x0f, 0xe8, 0x1b, 0x89, 0x60, 0xa5, 0x1e, 0x87, 0x49, 0xba, 0x8f,
+	0x8f, 0xd1, 0xb2, 0xc7, 0x5a, 0x4f, 0x8e, 0x0e, 0xe5, 0xed, 0x15, 0xcc, 0x8f, 0xd2, 0xe3, 0xaf,
+	0xcb, 0xe8, 0xcb, 0x7e, 0xe9, 0xdd, 0x59, 0x5f, 0x48, 0x71, 0xe1, 0x01, 0xd7, 0x9f, 0x1c, 0x1d,
+	0x92, 0x04, 0x81, 0xbf, 0x46, 0x98, 0x83, 0x7f, 0x46, 0x6d, 0x38, 0xb0, 0x6d, 0x16, 0xb8, 0x42,
+	0x5a, 0x58, 0x92, 0xe0, 0xed, 0x04, 0x8c, 0x1b, 0x99, 0x0c, 0x32, 0x45, 0x85, 0x7b, 0x68, 0x6b,
+	0x3c, 0x1a, 0x79, 0xcc, 0x4b, 0xd4, 0x17, 0x09, 0x6a, 0xab, 0x31, 0x99, 0x30, 0x9f, 0xdd, 0x2c,
+	0x18, 0x7f, 0x83, 0x56, 0x5d, 0xd6, 0x02, 0xe9, 0x77, 0x59, 0x16, 0xf9, 0x34, 0x9d, 0x87, 0x5a,
+	0x12, 0x7f, 0xd9, 0x2f, 0xbd, 0xf7, 0x6a, 0x76, 0x9a, 0x49, 0x06, 0x2c, 0x5c, 0x43, 0x2b, 0xd1,
+	0xef, 0xc8, 0xfb, 0x8a, 0xc4, 0xee, 0xa7, 0x37, 0x51, 0x8b, 0xc3, 0xf3, 0x39, 0x4e, 0x21, 0xf8,
+	0x21, 0xba, 0xe3, 0x58, 0xe7, 0x5f, 0x9e, 0x7b, 0xd4, 0xb7, 0x04, 0x65, 0x6e, 0x03, 0x6c, 0xe6,
+	0xb6, 0xb8, 0xba, 0x5a, 0x56, 0x76, 0xf2, 0xa6, 0x1a, 0xf6, 0x4b, 0x77, 0x1e, 0x4d, 0xd9, 0x27,
+	0x53, 0x55, 0xf8, 0x3e, 0x5a, 0xf7, 0x4e, 0xe9, 0x79, 0x3d, 0x68, 0xf6, 0xa8, 0x7d, 0x0c, 0x17,
+	0x6a, 0xa1, 0xac, 0xec, 0xac, 0x99, 0x5b, 0x61, 0xbf, 0xb4, 0x5e, 0x3f, 0x3e, 0xfa, 0x6e, 0xb0,
+	0x41, 0xc6, 0xf3, 0x70, 0x15, 0x6d, 0x79, 0x3e, 0x63, 0xed, 0xc7, 0xed, 0x3a, 0xe3, 0x1c, 0x38,
+	0xa7, 0xcc, 0x55, 0x91, 0x14, 0xdf, 0x8d, 0x2e, 0xa6, 0x3e, 0xb9, 0x49, 0xb2, 0xf9, 0x95, 0xbf,
+	0x17, 0xd1, 0x3b, 0xaf, 0xf8, 0x12, 0x60, 0x1b, 0xa1, 0xc8, 0x26, 0x8d, 0x1c, 0x73, 0x55, 0x91,
+	0x4f, 0xcf, 0x98, 0xef, 0x55, 0x57, 0x53, 0xdd, 0xf0, 0xa9, 0x0c, 0x42, 0x9c, 0x8c, 0x60, 0xf1,
+	0x21, 0xda, 0x1c, 0x79, 0xc1, 0xd5, 0xae, 0x45, 0xdd, 0xe4, 0xcd, 0xa8, 0x89, 0x72, 0xb3, 0x3a,
+	0xb1, 0x4f, 0x32, 0x0a, 0xfc, 0x2d, 0x2a, 0xb8, 0x4c, 0x98, 0xd0, 0x66, 0x7e, 0x3c, 0xef, 0xc5,
+	0xbd, 0x0f, 0xe6, 0x73, 0x7a, 0x42, 0x1d, 0x30, 0xd7, 0xc3, 0x7e, 0xa9, 0x50, 0x4b, 0x01, 0x64,
+	0xc8, 0xc2, 0x6d, 0xb4, 0xd1, 0x84, 0x0e, 0x75, 0x09, 0xb4, 0x7d, 0xe0, 0xdd, 0x03, 0x21, 0x9f,
+	0xc0, 0xeb, 0xd1, 0x71, 0xd8, 0x2f, 0x6d, 0x98, 0x63, 0x14, 0x32, 0x41, 0xc5, 0x27, 0xd1, 0xfc,
+	0x8b, 0x83, 0xb6, 0x00, 0x5f, 0xce, 0xff, 0xeb, 0x55, 0x58, 0x8b, 0xdf, 0x49, 0xac, 0x27, 0x03,
+	0x92, 0xf9, 0xd5, 0xe5, 0x95, 0x96, 0x7b, 0x7e, 0xa5, 0xe5, 0x5e, 0x5c, 0x69, 0xb9, 0x5f, 0x42,
+	0x4d, 0xb9, 0x0c, 0x35, 0xe5, 0x79, 0xa8, 0x29, 0x2f, 0x42, 0x4d, 0xf9, 0x27, 0xd4, 0x94, 0xdf,
+	0xfe, 0xd5, 0x72, 0x4f, 0xcb, 0xd7, 0xfd, 0xd9, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x6c,
+	0x5a, 0xc4, 0x8f, 0x0a, 0x00, 0x00,
 }
 
 func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) {
@@ -292,6 +441,261 @@ func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)
 	return len(dAtA) - i, nil
 }
 
+func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ProofOfPossession != nil {
+		i -= len(m.ProofOfPossession)
+		copy(dAtA[i:], m.ProofOfPossession)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession)))
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.PKIXPublicKey != nil {
+		i -= len(m.PKIXPublicKey)
+		copy(dAtA[i:], m.PKIXPublicKey)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey)))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.MaxExpirationSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
+		i--
+		dAtA[i] = 0x40
+	}
+	i -= len(m.NodeUID)
+	copy(dAtA[i:], m.NodeUID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.NodeName)
+	copy(dAtA[i:], m.NodeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.ServiceAccountUID)
+	copy(dAtA[i:], m.ServiceAccountUID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.ServiceAccountName)
+	copy(dAtA[i:], m.ServiceAccountName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.PodUID)
+	copy(dAtA[i:], m.PodUID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.PodName)
+	copy(dAtA[i:], m.PodName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.SignerName)
+	copy(dAtA[i:], m.SignerName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.NotAfter != nil {
+		{
+			size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.BeginRefreshAt != nil {
+		{
+			size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.NotBefore != nil {
+		{
+			size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.CertificateChain)
+	copy(dAtA[i:], m.CertificateChain)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
 func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
 	offset -= sovGenerated(v)
 	base := offset
@@ -346,25 +750,120 @@ func (m *ClusterTrustBundleSpec) Size() (n int) {
 	return n
 }
 
-func sovGenerated(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
-	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *ClusterTrustBundle) String() string {
-	if this == nil {
-		return "nil"
+func (m *PodCertificateRequest) Size() (n int) {
+	if m == nil {
+		return 0
 	}
-	s := strings.Join([]string{`&ClusterTrustBundle{`,
-		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ClusterTrustBundleList) String() string {
-	if this == nil {
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodCertificateRequestList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodCertificateRequestSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.SignerName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.PodName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.PodUID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ServiceAccountName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ServiceAccountUID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.NodeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.NodeUID)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.MaxExpirationSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
+	}
+	if m.PKIXPublicKey != nil {
+		l = len(m.PKIXPublicKey)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ProofOfPossession != nil {
+		l = len(m.ProofOfPossession)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodCertificateRequestStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.CertificateChain)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.NotBefore != nil {
+		l = m.NotBefore.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.BeginRefreshAt != nil {
+		l = m.BeginRefreshAt.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NotAfter != nil {
+		l = m.NotAfter.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ClusterTrustBundle) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ClusterTrustBundle{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ClusterTrustBundleList) String() string {
+	if this == nil {
 		return "nil"
 	}
 	repeatedStringForItems := "[]ClusterTrustBundle{"
@@ -390,6 +889,72 @@ func (this *ClusterTrustBundleSpec) String() string {
 	}, "")
 	return s
 }
+func (this *PodCertificateRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodCertificateRequest{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodCertificateRequestList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]PodCertificateRequest{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PodCertificateRequestList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodCertificateRequestSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodCertificateRequestSpec{`,
+		`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+		`PodName:` + fmt.Sprintf("%v", this.PodName) + `,`,
+		`PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`,
+		`ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
+		`ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`,
+		`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+		`NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`,
+		`MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
+		`PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`,
+		`ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodCertificateRequestStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]Condition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&PodCertificateRequestStatus{`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`,
+		`NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`,
+		`BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`,
+		`NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func valueToStringGenerated(v interface{}) string {
 	rv := reflect.ValueOf(v)
 	if rv.IsNil() {
@@ -745,6 +1310,858 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, PodCertificateRequest{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SignerName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PodName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.MaxExpirationSeconds = &v
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...)
+			if m.PKIXPublicKey == nil {
+				m.PKIXPublicKey = []byte{}
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...)
+			if m.ProofOfPossession == nil {
+				m.ProofOfPossession = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, v1.Condition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CertificateChain = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NotBefore == nil {
+				m.NotBefore = &v1.Time{}
+			}
+			if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.BeginRefreshAt == nil {
+				m.BeginRefreshAt = &v1.Time{}
+			}
+			if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NotAfter == nil {
+				m.NotAfter = &v1.Time{}
+			}
+			if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
index 7155f778c..194bdbc14 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
@@ -101,3 +101,208 @@ message ClusterTrustBundleSpec {
   optional string trustBundle = 2;
 }
 
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
+message PodCertificateRequest {
+  // metadata contains the object metadata.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec contains the details about the certificate being requested.
+  optional PodCertificateRequestSpec spec = 2;
+
+  // status contains the issued certificate, and a standard set of conditions.
+  // +optional
+  optional PodCertificateRequestStatus status = 3;
+}
+
+// PodCertificateRequestList is a collection of PodCertificateRequest objects
+message PodCertificateRequestList {
+  // metadata contains the list metadata.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is a collection of PodCertificateRequest objects
+  repeated PodCertificateRequest items = 2;
+}
+
+// PodCertificateRequestSpec describes the certificate request.  All fields are
+// immutable after creation.
+message PodCertificateRequestSpec {
+  // signerName indicates the requested signer.
+  //
+  // All signer names beginning with `kubernetes.io` are reserved for use by
+  // the Kubernetes project.  There is currently one well-known signer
+  // documented by the Kubernetes project,
+  // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+  // certificates understood by kube-apiserver.  It is currently
+  // unimplemented.
+  //
+  // +required
+  optional string signerName = 1;
+
+  // podName is the name of the pod into which the certificate will be mounted.
+  //
+  // +required
+  optional string podName = 2;
+
+  // podUID is the UID of the pod into which the certificate will be mounted.
+  //
+  // +required
+  optional string podUID = 3;
+
+  // serviceAccountName is the name of the service account the pod is running as.
+  //
+  // +required
+  optional string serviceAccountName = 4;
+
+  // serviceAccountUID is the UID of the service account the pod is running as.
+  //
+  // +required
+  optional string serviceAccountUID = 5;
+
+  // nodeName is the name of the node the pod is assigned to.
+  //
+  // +required
+  optional string nodeName = 6;
+
+  // nodeUID is the UID of the node the pod is assigned to.
+  //
+  // +required
+  optional string nodeUID = 7;
+
+  // maxExpirationSeconds is the maximum lifetime permitted for the
+  // certificate.
+  //
+  // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+  // will reject values shorter than 3600 (1 hour).  The maximum allowable
+  // value is 7862400 (91 days).
+  //
+  // The signer implementation is then free to issue a certificate with any
+  // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+  // seconds (1 hour).  This constraint is enforced by kube-apiserver.
+  // `kubernetes.io` signers will never issue certificates with a lifetime
+  // longer than 24 hours.
+  //
+  // +optional
+  // +default=86400
+  optional int32 maxExpirationSeconds = 8;
+
+  // pkixPublicKey is the PKIX-serialized public key the signer will issue the
+  // certificate to.
+  //
+  // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+  // or ED25519. Note that this list may be expanded in the future.
+  //
+  // Signer implementations do not need to support all key types supported by
+  // kube-apiserver and kubelet.  If a signer does not support the key type
+  // used for a given PodCertificateRequest, it must deny the request by
+  // setting a status.conditions entry with a type of "Denied" and a reason of
+  // "UnsupportedKeyType". It may also suggest a key type that it does support
+  // in the message field.
+  //
+  // +required
+  optional bytes pkixPublicKey = 9;
+
+  // proofOfPossession proves that the requesting kubelet holds the private
+  // key corresponding to pkixPublicKey.
+  //
+  // It is contructed by signing the ASCII bytes of the pod's UID using
+  // `pkixPublicKey`.
+  //
+  // kube-apiserver validates the proof of possession during creation of the
+  // PodCertificateRequest.
+  //
+  // If the key is an RSA key, then the signature is over the ASCII bytes of
+  // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+  // function crypto/rsa.SignPSS with nil options).
+  //
+  // If the key is an ECDSA key, then the signature is as described by [SEC 1,
+  // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+  // golang library function crypto/ecdsa.SignASN1)
+  //
+  // If the key is an ED25519 key, the the signature is as described by the
+  // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+  // the golang library crypto/ed25519.Sign).
+  //
+  // +required
+  optional bytes proofOfPossession = 10;
+}
+
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
+message PodCertificateRequestStatus {
+  // conditions applied to the request.
+  //
+  // The types "Issued", "Denied", and "Failed" have special handling.  At
+  // most one of these conditions may be present, and they must have status
+  // "True".
+  //
+  // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+  // suggest a key type that will work in the message field.
+  //
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  // +optional
+  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
+
+  // certificateChain is populated with an issued certificate by the signer.
+  // This field is set via the /status subresource. Once populated, this field
+  // is immutable.
+  //
+  // If the certificate signing request is denied, a condition of type
+  // "Denied" is added and this field remains empty. If the signer cannot
+  // issue the certificate, a condition of type "Failed" is added and this
+  // field remains empty.
+  //
+  // Validation requirements:
+  //  1. certificateChain must consist of one or more PEM-formatted certificates.
+  //  2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+  //     described in section 4 of RFC5280.
+  //
+  // If more than one block is present, and the definition of the requested
+  // spec.signerName does not indicate otherwise, the first block is the
+  // issued certificate, and subsequent blocks should be treated as
+  // intermediate certificates and presented in TLS handshakes.  When
+  // projecting the chain into a pod volume, kubelet will drop any data
+  // in-between the PEM blocks, as well as any PEM block headers.
+  //
+  // +optional
+  optional string certificateChain = 2;
+
+  // notBefore is the time at which the certificate becomes valid.  The value
+  // must be the same as the notBefore value in the leaf certificate in
+  // certificateChain.  This field is set via the /status subresource.  Once
+  // populated, it is immutable. The signer must set this field at the same
+  // time it sets certificateChain.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4;
+
+  // beginRefreshAt is the time at which the kubelet should begin trying to
+  // refresh the certificate.  This field is set via the /status subresource,
+  // and must be set at the same time as certificateChain.  Once populated,
+  // this field is immutable.
+  //
+  // This field is only a hint.  Kubelet may start refreshing before or after
+  // this time if necessary.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5;
+
+  // notAfter is the time at which the certificate expires.  The value must be
+  // the same as the notAfter value in the leaf certificate in
+  // certificateChain.  This field is set via the /status subresource.  Once
+  // populated, it is immutable.  The signer must set this field at the same
+  // time it sets certificateChain.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6;
+}
+
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go
index 7288ed9a3..ae541e15c 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/register.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go
@@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 	scheme.AddKnownTypes(SchemeGroupVersion,
 		&ClusterTrustBundle{},
 		&ClusterTrustBundleList{},
+		&PodCertificateRequest{},
+		&PodCertificateRequestList{},
 	)
 
 	// Add the watch version that applies
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go
index beef02599..a5cb3809e 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/types.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go
@@ -18,6 +18,7 @@ package v1alpha1
 
 import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
 )
 
 // +genclient
@@ -106,3 +107,233 @@ type ClusterTrustBundleList struct {
 	// items is a collection of ClusterTrustBundle objects
 	Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
 }
+
+// +genclient
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
+type PodCertificateRequest struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// metadata contains the object metadata.
+	//
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec contains the details about the certificate being requested.
+	Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// status contains the issued certificate, and a standard set of conditions.
+	// +optional
+	Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodCertificateRequestSpec describes the certificate request.  All fields are
+// immutable after creation.
+type PodCertificateRequestSpec struct {
+	// signerName indicates the requested signer.
+	//
+	// All signer names beginning with `kubernetes.io` are reserved for use by
+	// the Kubernetes project.  There is currently one well-known signer
+	// documented by the Kubernetes project,
+	// `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+	// certificates understood by kube-apiserver.  It is currently
+	// unimplemented.
+	//
+	// +required
+	SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
+
+	// podName is the name of the pod into which the certificate will be mounted.
+	//
+	// +required
+	PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
+	// podUID is the UID of the pod into which the certificate will be mounted.
+	//
+	// +required
+	PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
+
+	// serviceAccountName is the name of the service account the pod is running as.
+	//
+	// +required
+	ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
+	// serviceAccountUID is the UID of the service account the pod is running as.
+	//
+	// +required
+	ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
+
+	// nodeName is the name of the node the pod is assigned to.
+	//
+	// +required
+	NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
+	// nodeUID is the UID of the node the pod is assigned to.
+	//
+	// +required
+	NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
+
+	// maxExpirationSeconds is the maximum lifetime permitted for the
+	// certificate.
+	//
+	// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+	// will reject values shorter than 3600 (1 hour).  The maximum allowable
+	// value is 7862400 (91 days).
+	//
+	// The signer implementation is then free to issue a certificate with any
+	// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+	// seconds (1 hour).  This constraint is enforced by kube-apiserver.
+	// `kubernetes.io` signers will never issue certificates with a lifetime
+	// longer than 24 hours.
+	//
+	// +optional
+	// +default=86400
+	MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
+
+	// pkixPublicKey is the PKIX-serialized public key the signer will issue the
+	// certificate to.
+	//
+	// The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+	// or ED25519. Note that this list may be expanded in the future.
+	//
+	// Signer implementations do not need to support all key types supported by
+	// kube-apiserver and kubelet.  If a signer does not support the key type
+	// used for a given PodCertificateRequest, it must deny the request by
+	// setting a status.conditions entry with a type of "Denied" and a reason of
+	// "UnsupportedKeyType". It may also suggest a key type that it does support
+	// in the message field.
+	//
+	// +required
+	PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
+
+	// proofOfPossession proves that the requesting kubelet holds the private
+	// key corresponding to pkixPublicKey.
+	//
+	// It is contructed by signing the ASCII bytes of the pod's UID using
+	// `pkixPublicKey`.
+	//
+	// kube-apiserver validates the proof of possession during creation of the
+	// PodCertificateRequest.
+	//
+	// If the key is an RSA key, then the signature is over the ASCII bytes of
+	// the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+	// function crypto/rsa.SignPSS with nil options).
+	//
+	// If the key is an ECDSA key, then the signature is as described by [SEC 1,
+	// Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+	// golang library function crypto/ecdsa.SignASN1)
+	//
+	// If the key is an ED25519 key, the the signature is as described by the
+	// [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+	// the golang library crypto/ed25519.Sign).
+	//
+	// +required
+	ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
+}
+
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
+type PodCertificateRequestStatus struct {
+	// conditions applied to the request.
+	//
+	// The types "Issued", "Denied", and "Failed" have special handling.  At
+	// most one of these conditions may be present, and they must have status
+	// "True".
+	//
+	// If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+	// suggest a key type that will work in the message field.
+	//
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	// +optional
+	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+	// certificateChain is populated with an issued certificate by the signer.
+	// This field is set via the /status subresource. Once populated, this field
+	// is immutable.
+	//
+	// If the certificate signing request is denied, a condition of type
+	// "Denied" is added and this field remains empty. If the signer cannot
+	// issue the certificate, a condition of type "Failed" is added and this
+	// field remains empty.
+	//
+	// Validation requirements:
+	//  1. certificateChain must consist of one or more PEM-formatted certificates.
+	//  2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+	//     described in section 4 of RFC5280.
+	//
+	// If more than one block is present, and the definition of the requested
+	// spec.signerName does not indicate otherwise, the first block is the
+	// issued certificate, and subsequent blocks should be treated as
+	// intermediate certificates and presented in TLS handshakes.  When
+	// projecting the chain into a pod volume, kubelet will drop any data
+	// in-between the PEM blocks, as well as any PEM block headers.
+	//
+	// +optional
+	CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
+
+	// notBefore is the time at which the certificate becomes valid.  The value
+	// must be the same as the notBefore value in the leaf certificate in
+	// certificateChain.  This field is set via the /status subresource.  Once
+	// populated, it is immutable. The signer must set this field at the same
+	// time it sets certificateChain.
+	//
+	// +optional
+	NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
+
+	// beginRefreshAt is the time at which the kubelet should begin trying to
+	// refresh the certificate.  This field is set via the /status subresource,
+	// and must be set at the same time as certificateChain.  Once populated,
+	// this field is immutable.
+	//
+	// This field is only a hint.  Kubelet may start refreshing before or after
+	// this time if necessary.
+	//
+	// +optional
+	BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
+
+	// notAfter is the time at which the certificate expires.  The value must be
+	// the same as the notAfter value in the leaf certificate in
+	// certificateChain.  This field is set via the /status subresource.  Once
+	// populated, it is immutable.  The signer must set this field at the same
+	// time it sets certificateChain.
+	//
+	// +optional
+	NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
+}
+
+// Well-known condition types for PodCertificateRequests
+const (
+	// Denied indicates the request was denied by the signer.
+	PodCertificateRequestConditionTypeDenied string = "Denied"
+	// Failed indicates the signer failed to issue the certificate.
+	PodCertificateRequestConditionTypeFailed string = "Failed"
+	// Issued indicates the certificate has been issued.
+	PodCertificateRequestConditionTypeIssued string = "Issued"
+)
+
+// Well-known condition reasons for PodCertificateRequests
+const (
+	// UnsupportedKeyType should be set on "Denied" conditions when the signer
+	// doesn't support the key type of publicKey.
+	PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.34
+
+// PodCertificateRequestList is a collection of PodCertificateRequest objects
+type PodCertificateRequestList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// metadata contains the list metadata.
+	//
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is a collection of PodCertificateRequest objects
+	Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
index bff649e3c..d29f2d850 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
@@ -57,4 +57,56 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
 	return map_ClusterTrustBundleSpec
 }
 
+var map_PodCertificateRequest = map[string]string{
+	"":         "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes",
+	"metadata": "metadata contains the object metadata.",
+	"spec":     "spec contains the details about the certificate being requested.",
+	"status":   "status contains the issued certificate, and a standard set of conditions.",
+}
+
+func (PodCertificateRequest) SwaggerDoc() map[string]string {
+	return map_PodCertificateRequest
+}
+
+var map_PodCertificateRequestList = map[string]string{
+	"":         "PodCertificateRequestList is a collection of PodCertificateRequest objects",
+	"metadata": "metadata contains the list metadata.",
+	"items":    "items is a collection of PodCertificateRequest objects",
+}
+
+func (PodCertificateRequestList) SwaggerDoc() map[string]string {
+	return map_PodCertificateRequestList
+}
+
+var map_PodCertificateRequestSpec = map[string]string{
+	"":                     "PodCertificateRequestSpec describes the certificate request.  All fields are immutable after creation.",
+	"signerName":           "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project.  There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver.  It is currently unimplemented.",
+	"podName":              "podName is the name of the pod into which the certificate will be mounted.",
+	"podUID":               "podUID is the UID of the pod into which the certificate will be mounted.",
+	"serviceAccountName":   "serviceAccountName is the name of the service account the pod is running as.",
+	"serviceAccountUID":    "serviceAccountUID is the UID of the service account the pod is running as.",
+	"nodeName":             "nodeName is the name of the node the pod is assigned to.",
+	"nodeUID":              "nodeUID is the UID of the node the pod is assigned to.",
+	"maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour).  The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour).  This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
+	"pkixPublicKey":        "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet.  If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.",
+	"proofOfPossession":    "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).",
+}
+
+func (PodCertificateRequestSpec) SwaggerDoc() map[string]string {
+	return map_PodCertificateRequestSpec
+}
+
+var map_PodCertificateRequestStatus = map[string]string{
+	"":                 "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.",
+	"conditions":       "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling.  At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.",
+	"certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n    described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes.  When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.",
+	"notBefore":        "notBefore is the time at which the certificate becomes valid.  The value must be the same as the notBefore value in the leaf certificate in certificateChain.  This field is set via the /status subresource.  Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
+	"beginRefreshAt":   "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate.  This field is set via the /status subresource, and must be set at the same time as certificateChain.  Once populated, this field is immutable.\n\nThis field is only a hint.  Kubelet may start refreshing before or after this time if necessary.",
+	"notAfter":         "notAfter is the time at which the certificate expires.  The value must be the same as the notAfter value in the leaf certificate in certificateChain.  This field is set via the /status subresource.  Once populated, it is immutable.  The signer must set this field at the same time it sets certificateChain.",
+}
+
+func (PodCertificateRequestStatus) SwaggerDoc() map[string]string {
+	return map_PodCertificateRequestStatus
+}
+
 // AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
index 30a4dc1e8..25bc0ed6c 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
@@ -22,6 +22,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	runtime "k8s.io/apimachinery/pkg/runtime"
 )
 
@@ -100,3 +101,130 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
 	in.DeepCopyInto(out)
 	return out
 }
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
+func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCertificateRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodCertificateRequest, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
+func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCertificateRequestList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
+	*out = *in
+	if in.MaxExpirationSeconds != nil {
+		in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	if in.PKIXPublicKey != nil {
+		in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ProofOfPossession != nil {
+		in, out := &in.ProofOfPossession, &out.ProofOfPossession
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
+func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCertificateRequestSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]v1.Condition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.NotBefore != nil {
+		in, out := &in.NotBefore, &out.NotBefore
+		*out = (*in).DeepCopy()
+	}
+	if in.BeginRefreshAt != nil {
+		in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
+		*out = (*in).DeepCopy()
+	}
+	if in.NotAfter != nil {
+		in, out := &in.NotAfter, &out.NotAfter
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
+func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCertificateRequestStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
index 3121a87d0..edbfce79b 100644
--- a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
@@ -56,3 +56,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
 func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
 	return 1, 37
 }
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) {
+	return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) {
+	return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) {
+	return 1, 40
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 34
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) {
+	return 1, 37
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) {
+	return 1, 40
+}
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
index 7c48270f6..4c9385c19 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -30,6 +30,8 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
 option go_package = "k8s.io/api/certificates/v1beta1";
 
 // Describes a certificate signing request
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
 message CertificateSigningRequest {
   // +optional
   optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -182,6 +184,11 @@ message CertificateSigningRequestStatus {
   // +listType=map
   // +listMapKey=type
   // +optional
+  // +k8s:listType=map
+  // +k8s:listMapKey=type
+  // +k8s:optional
+  // +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+  // +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
   repeated CertificateSigningRequestCondition conditions = 1;
 
   // If request was approved, the controller will place the issued certificate here.
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
index 1ce104807..fadb7e082 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -31,6 +31,8 @@ import (
 // +k8s:prerelease-lifecycle-gen:replacement=certificates.k8s.io,v1,CertificateSigningRequest
 
 // Describes a certificate signing request
+// +k8s:supportsSubresource=/status
+// +k8s:supportsSubresource=/approval
 type CertificateSigningRequest struct {
 	metav1.TypeMeta `json:",inline"`
 	// +optional
@@ -175,6 +177,11 @@ type CertificateSigningRequestStatus struct {
 	// +listType=map
 	// +listMapKey=type
 	// +optional
+	// +k8s:listType=map
+	// +k8s:listMapKey=type
+	// +k8s:optional
+	// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
+	// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
 	Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
 
 	// If request was approved, the controller will place the issued certificate here.
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index a4b8f5842..e1a297b98 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -861,10 +861,38 @@ func (m *Container) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_Container proto.InternalMessageInfo
 
+func (m *ContainerExtendedResourceRequest) Reset()      { *m = ContainerExtendedResourceRequest{} }
+func (*ContainerExtendedResourceRequest) ProtoMessage() {}
+func (*ContainerExtendedResourceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{29}
+}
+func (m *ContainerExtendedResourceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerExtendedResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerExtendedResourceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerExtendedResourceRequest.Merge(m, src)
+}
+func (m *ContainerExtendedResourceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerExtendedResourceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerExtendedResourceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerExtendedResourceRequest proto.InternalMessageInfo
+
 func (m *ContainerImage) Reset()      { *m = ContainerImage{} }
 func (*ContainerImage) ProtoMessage() {}
 func (*ContainerImage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{29}
+	return fileDescriptor_6c07b07c062484ab, []int{30}
 }
 func (m *ContainerImage) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -892,7 +920,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo
 func (m *ContainerPort) Reset()      { *m = ContainerPort{} }
 func (*ContainerPort) ProtoMessage() {}
 func (*ContainerPort) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{30}
+	return fileDescriptor_6c07b07c062484ab, []int{31}
 }
 func (m *ContainerPort) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -920,7 +948,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo
 func (m *ContainerResizePolicy) Reset()      { *m = ContainerResizePolicy{} }
 func (*ContainerResizePolicy) ProtoMessage() {}
 func (*ContainerResizePolicy) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{31}
+	return fileDescriptor_6c07b07c062484ab, []int{32}
 }
 func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -945,10 +973,66 @@ func (m *ContainerResizePolicy) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo
 
+func (m *ContainerRestartRule) Reset()      { *m = ContainerRestartRule{} }
+func (*ContainerRestartRule) ProtoMessage() {}
+func (*ContainerRestartRule) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{33}
+}
+func (m *ContainerRestartRule) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerRestartRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerRestartRule) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerRestartRule.Merge(m, src)
+}
+func (m *ContainerRestartRule) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerRestartRule) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerRestartRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerRestartRule proto.InternalMessageInfo
+
+func (m *ContainerRestartRuleOnExitCodes) Reset()      { *m = ContainerRestartRuleOnExitCodes{} }
+func (*ContainerRestartRuleOnExitCodes) ProtoMessage() {}
+func (*ContainerRestartRuleOnExitCodes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{34}
+}
+func (m *ContainerRestartRuleOnExitCodes) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerRestartRuleOnExitCodes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerRestartRuleOnExitCodes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerRestartRuleOnExitCodes.Merge(m, src)
+}
+func (m *ContainerRestartRuleOnExitCodes) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerRestartRuleOnExitCodes) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerRestartRuleOnExitCodes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerRestartRuleOnExitCodes proto.InternalMessageInfo
+
 func (m *ContainerState) Reset()      { *m = ContainerState{} }
 func (*ContainerState) ProtoMessage() {}
 func (*ContainerState) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{32}
+	return fileDescriptor_6c07b07c062484ab, []int{35}
 }
 func (m *ContainerState) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -976,7 +1060,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo
 func (m *ContainerStateRunning) Reset()      { *m = ContainerStateRunning{} }
 func (*ContainerStateRunning) ProtoMessage() {}
 func (*ContainerStateRunning) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{33}
+	return fileDescriptor_6c07b07c062484ab, []int{36}
 }
 func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1004,7 +1088,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo
 func (m *ContainerStateTerminated) Reset()      { *m = ContainerStateTerminated{} }
 func (*ContainerStateTerminated) ProtoMessage() {}
 func (*ContainerStateTerminated) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{34}
+	return fileDescriptor_6c07b07c062484ab, []int{37}
 }
 func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1032,7 +1116,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo
 func (m *ContainerStateWaiting) Reset()      { *m = ContainerStateWaiting{} }
 func (*ContainerStateWaiting) ProtoMessage() {}
 func (*ContainerStateWaiting) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{35}
+	return fileDescriptor_6c07b07c062484ab, []int{38}
 }
 func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1060,7 +1144,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo
 func (m *ContainerStatus) Reset()      { *m = ContainerStatus{} }
 func (*ContainerStatus) ProtoMessage() {}
 func (*ContainerStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{36}
+	return fileDescriptor_6c07b07c062484ab, []int{39}
 }
 func (m *ContainerStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1088,7 +1172,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo
 func (m *ContainerUser) Reset()      { *m = ContainerUser{} }
 func (*ContainerUser) ProtoMessage() {}
 func (*ContainerUser) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{37}
+	return fileDescriptor_6c07b07c062484ab, []int{40}
 }
 func (m *ContainerUser) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1116,7 +1200,7 @@ var xxx_messageInfo_ContainerUser proto.InternalMessageInfo
 func (m *DaemonEndpoint) Reset()      { *m = DaemonEndpoint{} }
 func (*DaemonEndpoint) ProtoMessage() {}
 func (*DaemonEndpoint) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{38}
+	return fileDescriptor_6c07b07c062484ab, []int{41}
 }
 func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1144,7 +1228,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo
 func (m *DownwardAPIProjection) Reset()      { *m = DownwardAPIProjection{} }
 func (*DownwardAPIProjection) ProtoMessage() {}
 func (*DownwardAPIProjection) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{39}
+	return fileDescriptor_6c07b07c062484ab, []int{42}
 }
 func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1172,7 +1256,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo
 func (m *DownwardAPIVolumeFile) Reset()      { *m = DownwardAPIVolumeFile{} }
 func (*DownwardAPIVolumeFile) ProtoMessage() {}
 func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{40}
+	return fileDescriptor_6c07b07c062484ab, []int{43}
 }
 func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1200,7 +1284,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo
 func (m *DownwardAPIVolumeSource) Reset()      { *m = DownwardAPIVolumeSource{} }
 func (*DownwardAPIVolumeSource) ProtoMessage() {}
 func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{41}
+	return fileDescriptor_6c07b07c062484ab, []int{44}
 }
 func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1228,7 +1312,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo
 func (m *EmptyDirVolumeSource) Reset()      { *m = EmptyDirVolumeSource{} }
 func (*EmptyDirVolumeSource) ProtoMessage() {}
 func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{42}
+	return fileDescriptor_6c07b07c062484ab, []int{45}
 }
 func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1256,7 +1340,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo
 func (m *EndpointAddress) Reset()      { *m = EndpointAddress{} }
 func (*EndpointAddress) ProtoMessage() {}
 func (*EndpointAddress) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{43}
+	return fileDescriptor_6c07b07c062484ab, []int{46}
 }
 func (m *EndpointAddress) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1284,7 +1368,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo
 func (m *EndpointPort) Reset()      { *m = EndpointPort{} }
 func (*EndpointPort) ProtoMessage() {}
 func (*EndpointPort) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{44}
+	return fileDescriptor_6c07b07c062484ab, []int{47}
 }
 func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1312,7 +1396,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
 func (m *EndpointSubset) Reset()      { *m = EndpointSubset{} }
 func (*EndpointSubset) ProtoMessage() {}
 func (*EndpointSubset) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{45}
+	return fileDescriptor_6c07b07c062484ab, []int{48}
 }
 func (m *EndpointSubset) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1340,7 +1424,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo
 func (m *Endpoints) Reset()      { *m = Endpoints{} }
 func (*Endpoints) ProtoMessage() {}
 func (*Endpoints) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{46}
+	return fileDescriptor_6c07b07c062484ab, []int{49}
 }
 func (m *Endpoints) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1368,7 +1452,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo
 func (m *EndpointsList) Reset()      { *m = EndpointsList{} }
 func (*EndpointsList) ProtoMessage() {}
 func (*EndpointsList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{47}
+	return fileDescriptor_6c07b07c062484ab, []int{50}
 }
 func (m *EndpointsList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1396,7 +1480,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo
 func (m *EnvFromSource) Reset()      { *m = EnvFromSource{} }
 func (*EnvFromSource) ProtoMessage() {}
 func (*EnvFromSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{48}
+	return fileDescriptor_6c07b07c062484ab, []int{51}
 }
 func (m *EnvFromSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1424,7 +1508,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo
 func (m *EnvVar) Reset()      { *m = EnvVar{} }
 func (*EnvVar) ProtoMessage() {}
 func (*EnvVar) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{49}
+	return fileDescriptor_6c07b07c062484ab, []int{52}
 }
 func (m *EnvVar) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1452,7 +1536,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo
 func (m *EnvVarSource) Reset()      { *m = EnvVarSource{} }
 func (*EnvVarSource) ProtoMessage() {}
 func (*EnvVarSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{50}
+	return fileDescriptor_6c07b07c062484ab, []int{53}
 }
 func (m *EnvVarSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1480,7 +1564,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo
 func (m *EphemeralContainer) Reset()      { *m = EphemeralContainer{} }
 func (*EphemeralContainer) ProtoMessage() {}
 func (*EphemeralContainer) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{51}
+	return fileDescriptor_6c07b07c062484ab, []int{54}
 }
 func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1508,7 +1592,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo
 func (m *EphemeralContainerCommon) Reset()      { *m = EphemeralContainerCommon{} }
 func (*EphemeralContainerCommon) ProtoMessage() {}
 func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{52}
+	return fileDescriptor_6c07b07c062484ab, []int{55}
 }
 func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1536,7 +1620,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo
 func (m *EphemeralVolumeSource) Reset()      { *m = EphemeralVolumeSource{} }
 func (*EphemeralVolumeSource) ProtoMessage() {}
 func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{53}
+	return fileDescriptor_6c07b07c062484ab, []int{56}
 }
 func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1564,7 +1648,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo
 func (m *Event) Reset()      { *m = Event{} }
 func (*Event) ProtoMessage() {}
 func (*Event) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{54}
+	return fileDescriptor_6c07b07c062484ab, []int{57}
 }
 func (m *Event) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1592,7 +1676,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo
 func (m *EventList) Reset()      { *m = EventList{} }
 func (*EventList) ProtoMessage() {}
 func (*EventList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{55}
+	return fileDescriptor_6c07b07c062484ab, []int{58}
 }
 func (m *EventList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1620,7 +1704,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo
 func (m *EventSeries) Reset()      { *m = EventSeries{} }
 func (*EventSeries) ProtoMessage() {}
 func (*EventSeries) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{56}
+	return fileDescriptor_6c07b07c062484ab, []int{59}
 }
 func (m *EventSeries) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1648,7 +1732,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo
 func (m *EventSource) Reset()      { *m = EventSource{} }
 func (*EventSource) ProtoMessage() {}
 func (*EventSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{57}
+	return fileDescriptor_6c07b07c062484ab, []int{60}
 }
 func (m *EventSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1676,7 +1760,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo
 func (m *ExecAction) Reset()      { *m = ExecAction{} }
 func (*ExecAction) ProtoMessage() {}
 func (*ExecAction) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{58}
+	return fileDescriptor_6c07b07c062484ab, []int{61}
 }
 func (m *ExecAction) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1704,7 +1788,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo
 func (m *FCVolumeSource) Reset()      { *m = FCVolumeSource{} }
 func (*FCVolumeSource) ProtoMessage() {}
 func (*FCVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{59}
+	return fileDescriptor_6c07b07c062484ab, []int{62}
 }
 func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1729,10 +1813,38 @@ func (m *FCVolumeSource) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo
 
+func (m *FileKeySelector) Reset()      { *m = FileKeySelector{} }
+func (*FileKeySelector) ProtoMessage() {}
+func (*FileKeySelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{63}
+}
+func (m *FileKeySelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *FileKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *FileKeySelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileKeySelector.Merge(m, src)
+}
+func (m *FileKeySelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *FileKeySelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileKeySelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileKeySelector proto.InternalMessageInfo
+
 func (m *FlexPersistentVolumeSource) Reset()      { *m = FlexPersistentVolumeSource{} }
 func (*FlexPersistentVolumeSource) ProtoMessage() {}
 func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{60}
+	return fileDescriptor_6c07b07c062484ab, []int{64}
 }
 func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1760,7 +1872,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo
 func (m *FlexVolumeSource) Reset()      { *m = FlexVolumeSource{} }
 func (*FlexVolumeSource) ProtoMessage() {}
 func (*FlexVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{61}
+	return fileDescriptor_6c07b07c062484ab, []int{65}
 }
 func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1788,7 +1900,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo
 func (m *FlockerVolumeSource) Reset()      { *m = FlockerVolumeSource{} }
 func (*FlockerVolumeSource) ProtoMessage() {}
 func (*FlockerVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{62}
+	return fileDescriptor_6c07b07c062484ab, []int{66}
 }
 func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1816,7 +1928,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo
 func (m *GCEPersistentDiskVolumeSource) Reset()      { *m = GCEPersistentDiskVolumeSource{} }
 func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
 func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{63}
+	return fileDescriptor_6c07b07c062484ab, []int{67}
 }
 func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1844,7 +1956,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo
 func (m *GRPCAction) Reset()      { *m = GRPCAction{} }
 func (*GRPCAction) ProtoMessage() {}
 func (*GRPCAction) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{64}
+	return fileDescriptor_6c07b07c062484ab, []int{68}
 }
 func (m *GRPCAction) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1872,7 +1984,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo
 func (m *GitRepoVolumeSource) Reset()      { *m = GitRepoVolumeSource{} }
 func (*GitRepoVolumeSource) ProtoMessage() {}
 func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{65}
+	return fileDescriptor_6c07b07c062484ab, []int{69}
 }
 func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1900,7 +2012,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo
 func (m *GlusterfsPersistentVolumeSource) Reset()      { *m = GlusterfsPersistentVolumeSource{} }
 func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
 func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{66}
+	return fileDescriptor_6c07b07c062484ab, []int{70}
 }
 func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1928,7 +2040,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo
 func (m *GlusterfsVolumeSource) Reset()      { *m = GlusterfsVolumeSource{} }
 func (*GlusterfsVolumeSource) ProtoMessage() {}
 func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{67}
+	return fileDescriptor_6c07b07c062484ab, []int{71}
 }
 func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1956,7 +2068,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo
 func (m *HTTPGetAction) Reset()      { *m = HTTPGetAction{} }
 func (*HTTPGetAction) ProtoMessage() {}
 func (*HTTPGetAction) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{68}
+	return fileDescriptor_6c07b07c062484ab, []int{72}
 }
 func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1984,7 +2096,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo
 func (m *HTTPHeader) Reset()      { *m = HTTPHeader{} }
 func (*HTTPHeader) ProtoMessage() {}
 func (*HTTPHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{69}
+	return fileDescriptor_6c07b07c062484ab, []int{73}
 }
 func (m *HTTPHeader) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2012,7 +2124,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo
 func (m *HostAlias) Reset()      { *m = HostAlias{} }
 func (*HostAlias) ProtoMessage() {}
 func (*HostAlias) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{70}
+	return fileDescriptor_6c07b07c062484ab, []int{74}
 }
 func (m *HostAlias) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2040,7 +2152,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo
 func (m *HostIP) Reset()      { *m = HostIP{} }
 func (*HostIP) ProtoMessage() {}
 func (*HostIP) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{71}
+	return fileDescriptor_6c07b07c062484ab, []int{75}
 }
 func (m *HostIP) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2068,7 +2180,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo
 func (m *HostPathVolumeSource) Reset()      { *m = HostPathVolumeSource{} }
 func (*HostPathVolumeSource) ProtoMessage() {}
 func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{72}
+	return fileDescriptor_6c07b07c062484ab, []int{76}
 }
 func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2096,7 +2208,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
 func (m *ISCSIPersistentVolumeSource) Reset()      { *m = ISCSIPersistentVolumeSource{} }
 func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
 func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{73}
+	return fileDescriptor_6c07b07c062484ab, []int{77}
 }
 func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2124,7 +2236,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
 func (m *ISCSIVolumeSource) Reset()      { *m = ISCSIVolumeSource{} }
 func (*ISCSIVolumeSource) ProtoMessage() {}
 func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{74}
+	return fileDescriptor_6c07b07c062484ab, []int{78}
 }
 func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2152,7 +2264,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
 func (m *ImageVolumeSource) Reset()      { *m = ImageVolumeSource{} }
 func (*ImageVolumeSource) ProtoMessage() {}
 func (*ImageVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{75}
+	return fileDescriptor_6c07b07c062484ab, []int{79}
 }
 func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2180,7 +2292,7 @@ var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo
 func (m *KeyToPath) Reset()      { *m = KeyToPath{} }
 func (*KeyToPath) ProtoMessage() {}
 func (*KeyToPath) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{76}
+	return fileDescriptor_6c07b07c062484ab, []int{80}
 }
 func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2208,7 +2320,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
 func (m *Lifecycle) Reset()      { *m = Lifecycle{} }
 func (*Lifecycle) ProtoMessage() {}
 func (*Lifecycle) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{77}
+	return fileDescriptor_6c07b07c062484ab, []int{81}
 }
 func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2236,7 +2348,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
 func (m *LifecycleHandler) Reset()      { *m = LifecycleHandler{} }
 func (*LifecycleHandler) ProtoMessage() {}
 func (*LifecycleHandler) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{78}
+	return fileDescriptor_6c07b07c062484ab, []int{82}
 }
 func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2264,7 +2376,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
 func (m *LimitRange) Reset()      { *m = LimitRange{} }
 func (*LimitRange) ProtoMessage() {}
 func (*LimitRange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{79}
+	return fileDescriptor_6c07b07c062484ab, []int{83}
 }
 func (m *LimitRange) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2292,7 +2404,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo
 func (m *LimitRangeItem) Reset()      { *m = LimitRangeItem{} }
 func (*LimitRangeItem) ProtoMessage() {}
 func (*LimitRangeItem) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{80}
+	return fileDescriptor_6c07b07c062484ab, []int{84}
 }
 func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2320,7 +2432,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
 func (m *LimitRangeList) Reset()      { *m = LimitRangeList{} }
 func (*LimitRangeList) ProtoMessage() {}
 func (*LimitRangeList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{81}
+	return fileDescriptor_6c07b07c062484ab, []int{85}
 }
 func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2348,7 +2460,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
 func (m *LimitRangeSpec) Reset()      { *m = LimitRangeSpec{} }
 func (*LimitRangeSpec) ProtoMessage() {}
 func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{82}
+	return fileDescriptor_6c07b07c062484ab, []int{86}
 }
 func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2376,7 +2488,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
 func (m *LinuxContainerUser) Reset()      { *m = LinuxContainerUser{} }
 func (*LinuxContainerUser) ProtoMessage() {}
 func (*LinuxContainerUser) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{83}
+	return fileDescriptor_6c07b07c062484ab, []int{87}
 }
 func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2404,7 +2516,7 @@ var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo
 func (m *List) Reset()      { *m = List{} }
 func (*List) ProtoMessage() {}
 func (*List) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{84}
+	return fileDescriptor_6c07b07c062484ab, []int{88}
 }
 func (m *List) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2432,7 +2544,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
 func (m *LoadBalancerIngress) Reset()      { *m = LoadBalancerIngress{} }
 func (*LoadBalancerIngress) ProtoMessage() {}
 func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{85}
+	return fileDescriptor_6c07b07c062484ab, []int{89}
 }
 func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2460,7 +2572,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
 func (m *LoadBalancerStatus) Reset()      { *m = LoadBalancerStatus{} }
 func (*LoadBalancerStatus) ProtoMessage() {}
 func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{86}
+	return fileDescriptor_6c07b07c062484ab, []int{90}
 }
 func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2488,7 +2600,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
 func (m *LocalObjectReference) Reset()      { *m = LocalObjectReference{} }
 func (*LocalObjectReference) ProtoMessage() {}
 func (*LocalObjectReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{87}
+	return fileDescriptor_6c07b07c062484ab, []int{91}
 }
 func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2516,7 +2628,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
 func (m *LocalVolumeSource) Reset()      { *m = LocalVolumeSource{} }
 func (*LocalVolumeSource) ProtoMessage() {}
 func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{88}
+	return fileDescriptor_6c07b07c062484ab, []int{92}
 }
 func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2544,7 +2656,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
 func (m *ModifyVolumeStatus) Reset()      { *m = ModifyVolumeStatus{} }
 func (*ModifyVolumeStatus) ProtoMessage() {}
 func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{89}
+	return fileDescriptor_6c07b07c062484ab, []int{93}
 }
 func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2572,7 +2684,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo
 func (m *NFSVolumeSource) Reset()      { *m = NFSVolumeSource{} }
 func (*NFSVolumeSource) ProtoMessage() {}
 func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{90}
+	return fileDescriptor_6c07b07c062484ab, []int{94}
 }
 func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2600,7 +2712,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
 func (m *Namespace) Reset()      { *m = Namespace{} }
 func (*Namespace) ProtoMessage() {}
 func (*Namespace) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{91}
+	return fileDescriptor_6c07b07c062484ab, []int{95}
 }
 func (m *Namespace) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2628,7 +2740,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo
 func (m *NamespaceCondition) Reset()      { *m = NamespaceCondition{} }
 func (*NamespaceCondition) ProtoMessage() {}
 func (*NamespaceCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{92}
+	return fileDescriptor_6c07b07c062484ab, []int{96}
 }
 func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2656,7 +2768,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
 func (m *NamespaceList) Reset()      { *m = NamespaceList{} }
 func (*NamespaceList) ProtoMessage() {}
 func (*NamespaceList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{93}
+	return fileDescriptor_6c07b07c062484ab, []int{97}
 }
 func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2684,7 +2796,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
 func (m *NamespaceSpec) Reset()      { *m = NamespaceSpec{} }
 func (*NamespaceSpec) ProtoMessage() {}
 func (*NamespaceSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{94}
+	return fileDescriptor_6c07b07c062484ab, []int{98}
 }
 func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2712,7 +2824,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
 func (m *NamespaceStatus) Reset()      { *m = NamespaceStatus{} }
 func (*NamespaceStatus) ProtoMessage() {}
 func (*NamespaceStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{95}
+	return fileDescriptor_6c07b07c062484ab, []int{99}
 }
 func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2740,7 +2852,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
 func (m *Node) Reset()      { *m = Node{} }
 func (*Node) ProtoMessage() {}
 func (*Node) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{96}
+	return fileDescriptor_6c07b07c062484ab, []int{100}
 }
 func (m *Node) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2768,7 +2880,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo
 func (m *NodeAddress) Reset()      { *m = NodeAddress{} }
 func (*NodeAddress) ProtoMessage() {}
 func (*NodeAddress) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{97}
+	return fileDescriptor_6c07b07c062484ab, []int{101}
 }
 func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2796,7 +2908,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
 func (m *NodeAffinity) Reset()      { *m = NodeAffinity{} }
 func (*NodeAffinity) ProtoMessage() {}
 func (*NodeAffinity) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{98}
+	return fileDescriptor_6c07b07c062484ab, []int{102}
 }
 func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2824,7 +2936,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
 func (m *NodeCondition) Reset()      { *m = NodeCondition{} }
 func (*NodeCondition) ProtoMessage() {}
 func (*NodeCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{99}
+	return fileDescriptor_6c07b07c062484ab, []int{103}
 }
 func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2852,7 +2964,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
 func (m *NodeConfigSource) Reset()      { *m = NodeConfigSource{} }
 func (*NodeConfigSource) ProtoMessage() {}
 func (*NodeConfigSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{100}
+	return fileDescriptor_6c07b07c062484ab, []int{104}
 }
 func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2880,7 +2992,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
 func (m *NodeConfigStatus) Reset()      { *m = NodeConfigStatus{} }
 func (*NodeConfigStatus) ProtoMessage() {}
 func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{101}
+	return fileDescriptor_6c07b07c062484ab, []int{105}
 }
 func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2908,7 +3020,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
 func (m *NodeDaemonEndpoints) Reset()      { *m = NodeDaemonEndpoints{} }
 func (*NodeDaemonEndpoints) ProtoMessage() {}
 func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{102}
+	return fileDescriptor_6c07b07c062484ab, []int{106}
 }
 func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2936,7 +3048,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
 func (m *NodeFeatures) Reset()      { *m = NodeFeatures{} }
 func (*NodeFeatures) ProtoMessage() {}
 func (*NodeFeatures) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{103}
+	return fileDescriptor_6c07b07c062484ab, []int{107}
 }
 func (m *NodeFeatures) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2964,7 +3076,7 @@ var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo
 func (m *NodeList) Reset()      { *m = NodeList{} }
 func (*NodeList) ProtoMessage() {}
 func (*NodeList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{104}
+	return fileDescriptor_6c07b07c062484ab, []int{108}
 }
 func (m *NodeList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -2992,7 +3104,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo
 func (m *NodeProxyOptions) Reset()      { *m = NodeProxyOptions{} }
 func (*NodeProxyOptions) ProtoMessage() {}
 func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{105}
+	return fileDescriptor_6c07b07c062484ab, []int{109}
 }
 func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3020,7 +3132,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
 func (m *NodeRuntimeHandler) Reset()      { *m = NodeRuntimeHandler{} }
 func (*NodeRuntimeHandler) ProtoMessage() {}
 func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{106}
+	return fileDescriptor_6c07b07c062484ab, []int{110}
 }
 func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3048,7 +3160,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo
 func (m *NodeRuntimeHandlerFeatures) Reset()      { *m = NodeRuntimeHandlerFeatures{} }
 func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
 func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{107}
+	return fileDescriptor_6c07b07c062484ab, []int{111}
 }
 func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3076,7 +3188,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo
 func (m *NodeSelector) Reset()      { *m = NodeSelector{} }
 func (*NodeSelector) ProtoMessage() {}
 func (*NodeSelector) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{108}
+	return fileDescriptor_6c07b07c062484ab, []int{112}
 }
 func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3104,7 +3216,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
 func (m *NodeSelectorRequirement) Reset()      { *m = NodeSelectorRequirement{} }
 func (*NodeSelectorRequirement) ProtoMessage() {}
 func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{109}
+	return fileDescriptor_6c07b07c062484ab, []int{113}
 }
 func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3132,7 +3244,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
 func (m *NodeSelectorTerm) Reset()      { *m = NodeSelectorTerm{} }
 func (*NodeSelectorTerm) ProtoMessage() {}
 func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{110}
+	return fileDescriptor_6c07b07c062484ab, []int{114}
 }
 func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3160,7 +3272,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
 func (m *NodeSpec) Reset()      { *m = NodeSpec{} }
 func (*NodeSpec) ProtoMessage() {}
 func (*NodeSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{111}
+	return fileDescriptor_6c07b07c062484ab, []int{115}
 }
 func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3188,7 +3300,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
 func (m *NodeStatus) Reset()      { *m = NodeStatus{} }
 func (*NodeStatus) ProtoMessage() {}
 func (*NodeStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{112}
+	return fileDescriptor_6c07b07c062484ab, []int{116}
 }
 func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3216,7 +3328,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
 func (m *NodeSwapStatus) Reset()      { *m = NodeSwapStatus{} }
 func (*NodeSwapStatus) ProtoMessage() {}
 func (*NodeSwapStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{113}
+	return fileDescriptor_6c07b07c062484ab, []int{117}
 }
 func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3244,7 +3356,7 @@ var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo
 func (m *NodeSystemInfo) Reset()      { *m = NodeSystemInfo{} }
 func (*NodeSystemInfo) ProtoMessage() {}
 func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{114}
+	return fileDescriptor_6c07b07c062484ab, []int{118}
 }
 func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3272,7 +3384,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
 func (m *ObjectFieldSelector) Reset()      { *m = ObjectFieldSelector{} }
 func (*ObjectFieldSelector) ProtoMessage() {}
 func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{115}
+	return fileDescriptor_6c07b07c062484ab, []int{119}
 }
 func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3300,7 +3412,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
 func (m *ObjectReference) Reset()      { *m = ObjectReference{} }
 func (*ObjectReference) ProtoMessage() {}
 func (*ObjectReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{116}
+	return fileDescriptor_6c07b07c062484ab, []int{120}
 }
 func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3328,7 +3440,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
 func (m *PersistentVolume) Reset()      { *m = PersistentVolume{} }
 func (*PersistentVolume) ProtoMessage() {}
 func (*PersistentVolume) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{117}
+	return fileDescriptor_6c07b07c062484ab, []int{121}
 }
 func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3356,7 +3468,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
 func (m *PersistentVolumeClaim) Reset()      { *m = PersistentVolumeClaim{} }
 func (*PersistentVolumeClaim) ProtoMessage() {}
 func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{118}
+	return fileDescriptor_6c07b07c062484ab, []int{122}
 }
 func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3384,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
 func (m *PersistentVolumeClaimCondition) Reset()      { *m = PersistentVolumeClaimCondition{} }
 func (*PersistentVolumeClaimCondition) ProtoMessage() {}
 func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{119}
+	return fileDescriptor_6c07b07c062484ab, []int{123}
 }
 func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3412,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
 func (m *PersistentVolumeClaimList) Reset()      { *m = PersistentVolumeClaimList{} }
 func (*PersistentVolumeClaimList) ProtoMessage() {}
 func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{120}
+	return fileDescriptor_6c07b07c062484ab, []int{124}
 }
 func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3440,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
 func (m *PersistentVolumeClaimSpec) Reset()      { *m = PersistentVolumeClaimSpec{} }
 func (*PersistentVolumeClaimSpec) ProtoMessage() {}
 func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{121}
+	return fileDescriptor_6c07b07c062484ab, []int{125}
 }
 func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3468,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
 func (m *PersistentVolumeClaimStatus) Reset()      { *m = PersistentVolumeClaimStatus{} }
 func (*PersistentVolumeClaimStatus) ProtoMessage() {}
 func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{122}
+	return fileDescriptor_6c07b07c062484ab, []int{126}
 }
 func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3496,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
 func (m *PersistentVolumeClaimTemplate) Reset()      { *m = PersistentVolumeClaimTemplate{} }
 func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
 func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{123}
+	return fileDescriptor_6c07b07c062484ab, []int{127}
 }
 func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3524,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
 func (m *PersistentVolumeClaimVolumeSource) Reset()      { *m = PersistentVolumeClaimVolumeSource{} }
 func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
 func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{124}
+	return fileDescriptor_6c07b07c062484ab, []int{128}
 }
 func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3552,7 +3664,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
 func (m *PersistentVolumeList) Reset()      { *m = PersistentVolumeList{} }
 func (*PersistentVolumeList) ProtoMessage() {}
 func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{125}
+	return fileDescriptor_6c07b07c062484ab, []int{129}
 }
 func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3580,7 +3692,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
 func (m *PersistentVolumeSource) Reset()      { *m = PersistentVolumeSource{} }
 func (*PersistentVolumeSource) ProtoMessage() {}
 func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{126}
+	return fileDescriptor_6c07b07c062484ab, []int{130}
 }
 func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3608,7 +3720,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
 func (m *PersistentVolumeSpec) Reset()      { *m = PersistentVolumeSpec{} }
 func (*PersistentVolumeSpec) ProtoMessage() {}
 func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{127}
+	return fileDescriptor_6c07b07c062484ab, []int{131}
 }
 func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3636,7 +3748,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
 func (m *PersistentVolumeStatus) Reset()      { *m = PersistentVolumeStatus{} }
 func (*PersistentVolumeStatus) ProtoMessage() {}
 func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{128}
+	return fileDescriptor_6c07b07c062484ab, []int{132}
 }
 func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3664,7 +3776,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
 func (m *PhotonPersistentDiskVolumeSource) Reset()      { *m = PhotonPersistentDiskVolumeSource{} }
 func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
 func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{129}
+	return fileDescriptor_6c07b07c062484ab, []int{133}
 }
 func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3692,7 +3804,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
 func (m *Pod) Reset()      { *m = Pod{} }
 func (*Pod) ProtoMessage() {}
 func (*Pod) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{130}
+	return fileDescriptor_6c07b07c062484ab, []int{134}
 }
 func (m *Pod) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3720,7 +3832,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
 func (m *PodAffinity) Reset()      { *m = PodAffinity{} }
 func (*PodAffinity) ProtoMessage() {}
 func (*PodAffinity) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{131}
+	return fileDescriptor_6c07b07c062484ab, []int{135}
 }
 func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3748,7 +3860,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
 func (m *PodAffinityTerm) Reset()      { *m = PodAffinityTerm{} }
 func (*PodAffinityTerm) ProtoMessage() {}
 func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{132}
+	return fileDescriptor_6c07b07c062484ab, []int{136}
 }
 func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3776,7 +3888,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
 func (m *PodAntiAffinity) Reset()      { *m = PodAntiAffinity{} }
 func (*PodAntiAffinity) ProtoMessage() {}
 func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{133}
+	return fileDescriptor_6c07b07c062484ab, []int{137}
 }
 func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3804,7 +3916,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
 func (m *PodAttachOptions) Reset()      { *m = PodAttachOptions{} }
 func (*PodAttachOptions) ProtoMessage() {}
 func (*PodAttachOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{134}
+	return fileDescriptor_6c07b07c062484ab, []int{138}
 }
 func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3829,10 +3941,38 @@ func (m *PodAttachOptions) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
 
+func (m *PodCertificateProjection) Reset()      { *m = PodCertificateProjection{} }
+func (*PodCertificateProjection) ProtoMessage() {}
+func (*PodCertificateProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{139}
+}
+func (m *PodCertificateProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodCertificateProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodCertificateProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodCertificateProjection.Merge(m, src)
+}
+func (m *PodCertificateProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodCertificateProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodCertificateProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCertificateProjection proto.InternalMessageInfo
+
 func (m *PodCondition) Reset()      { *m = PodCondition{} }
 func (*PodCondition) ProtoMessage() {}
 func (*PodCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{135}
+	return fileDescriptor_6c07b07c062484ab, []int{140}
 }
 func (m *PodCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3860,7 +4000,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
 func (m *PodDNSConfig) Reset()      { *m = PodDNSConfig{} }
 func (*PodDNSConfig) ProtoMessage() {}
 func (*PodDNSConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{136}
+	return fileDescriptor_6c07b07c062484ab, []int{141}
 }
 func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3888,7 +4028,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
 func (m *PodDNSConfigOption) Reset()      { *m = PodDNSConfigOption{} }
 func (*PodDNSConfigOption) ProtoMessage() {}
 func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{137}
+	return fileDescriptor_6c07b07c062484ab, []int{142}
 }
 func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3916,7 +4056,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
 func (m *PodExecOptions) Reset()      { *m = PodExecOptions{} }
 func (*PodExecOptions) ProtoMessage() {}
 func (*PodExecOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{138}
+	return fileDescriptor_6c07b07c062484ab, []int{143}
 }
 func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3941,10 +4081,38 @@ func (m *PodExecOptions) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
 
+func (m *PodExtendedResourceClaimStatus) Reset()      { *m = PodExtendedResourceClaimStatus{} }
+func (*PodExtendedResourceClaimStatus) ProtoMessage() {}
+func (*PodExtendedResourceClaimStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{144}
+}
+func (m *PodExtendedResourceClaimStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodExtendedResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodExtendedResourceClaimStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodExtendedResourceClaimStatus.Merge(m, src)
+}
+func (m *PodExtendedResourceClaimStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodExtendedResourceClaimStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodExtendedResourceClaimStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodExtendedResourceClaimStatus proto.InternalMessageInfo
+
 func (m *PodIP) Reset()      { *m = PodIP{} }
 func (*PodIP) ProtoMessage() {}
 func (*PodIP) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{139}
+	return fileDescriptor_6c07b07c062484ab, []int{145}
 }
 func (m *PodIP) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -3972,7 +4140,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
 func (m *PodList) Reset()      { *m = PodList{} }
 func (*PodList) ProtoMessage() {}
 func (*PodList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{140}
+	return fileDescriptor_6c07b07c062484ab, []int{146}
 }
 func (m *PodList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4000,7 +4168,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
 func (m *PodLogOptions) Reset()      { *m = PodLogOptions{} }
 func (*PodLogOptions) ProtoMessage() {}
 func (*PodLogOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{141}
+	return fileDescriptor_6c07b07c062484ab, []int{147}
 }
 func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4028,7 +4196,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
 func (m *PodOS) Reset()      { *m = PodOS{} }
 func (*PodOS) ProtoMessage() {}
 func (*PodOS) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{142}
+	return fileDescriptor_6c07b07c062484ab, []int{148}
 }
 func (m *PodOS) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4056,7 +4224,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo
 func (m *PodPortForwardOptions) Reset()      { *m = PodPortForwardOptions{} }
 func (*PodPortForwardOptions) ProtoMessage() {}
 func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{143}
+	return fileDescriptor_6c07b07c062484ab, []int{149}
 }
 func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4084,7 +4252,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
 func (m *PodProxyOptions) Reset()      { *m = PodProxyOptions{} }
 func (*PodProxyOptions) ProtoMessage() {}
 func (*PodProxyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{144}
+	return fileDescriptor_6c07b07c062484ab, []int{150}
 }
 func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4112,7 +4280,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
 func (m *PodReadinessGate) Reset()      { *m = PodReadinessGate{} }
 func (*PodReadinessGate) ProtoMessage() {}
 func (*PodReadinessGate) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{145}
+	return fileDescriptor_6c07b07c062484ab, []int{151}
 }
 func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4140,7 +4308,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
 func (m *PodResourceClaim) Reset()      { *m = PodResourceClaim{} }
 func (*PodResourceClaim) ProtoMessage() {}
 func (*PodResourceClaim) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{146}
+	return fileDescriptor_6c07b07c062484ab, []int{152}
 }
 func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4168,7 +4336,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
 func (m *PodResourceClaimStatus) Reset()      { *m = PodResourceClaimStatus{} }
 func (*PodResourceClaimStatus) ProtoMessage() {}
 func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{147}
+	return fileDescriptor_6c07b07c062484ab, []int{153}
 }
 func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4196,7 +4364,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
 func (m *PodSchedulingGate) Reset()      { *m = PodSchedulingGate{} }
 func (*PodSchedulingGate) ProtoMessage() {}
 func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{148}
+	return fileDescriptor_6c07b07c062484ab, []int{154}
 }
 func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4224,7 +4392,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
 func (m *PodSecurityContext) Reset()      { *m = PodSecurityContext{} }
 func (*PodSecurityContext) ProtoMessage() {}
 func (*PodSecurityContext) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{149}
+	return fileDescriptor_6c07b07c062484ab, []int{155}
 }
 func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4252,7 +4420,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
 func (m *PodSignature) Reset()      { *m = PodSignature{} }
 func (*PodSignature) ProtoMessage() {}
 func (*PodSignature) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{150}
+	return fileDescriptor_6c07b07c062484ab, []int{156}
 }
 func (m *PodSignature) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4280,7 +4448,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
 func (m *PodSpec) Reset()      { *m = PodSpec{} }
 func (*PodSpec) ProtoMessage() {}
 func (*PodSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{151}
+	return fileDescriptor_6c07b07c062484ab, []int{157}
 }
 func (m *PodSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4308,7 +4476,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
 func (m *PodStatus) Reset()      { *m = PodStatus{} }
 func (*PodStatus) ProtoMessage() {}
 func (*PodStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{152}
+	return fileDescriptor_6c07b07c062484ab, []int{158}
 }
 func (m *PodStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4336,7 +4504,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
 func (m *PodStatusResult) Reset()      { *m = PodStatusResult{} }
 func (*PodStatusResult) ProtoMessage() {}
 func (*PodStatusResult) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{153}
+	return fileDescriptor_6c07b07c062484ab, []int{159}
 }
 func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4364,7 +4532,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
 func (m *PodTemplate) Reset()      { *m = PodTemplate{} }
 func (*PodTemplate) ProtoMessage() {}
 func (*PodTemplate) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{154}
+	return fileDescriptor_6c07b07c062484ab, []int{160}
 }
 func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4392,7 +4560,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
 func (m *PodTemplateList) Reset()      { *m = PodTemplateList{} }
 func (*PodTemplateList) ProtoMessage() {}
 func (*PodTemplateList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{155}
+	return fileDescriptor_6c07b07c062484ab, []int{161}
 }
 func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4420,7 +4588,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
 func (m *PodTemplateSpec) Reset()      { *m = PodTemplateSpec{} }
 func (*PodTemplateSpec) ProtoMessage() {}
 func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{156}
+	return fileDescriptor_6c07b07c062484ab, []int{162}
 }
 func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4448,7 +4616,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
 func (m *PortStatus) Reset()      { *m = PortStatus{} }
 func (*PortStatus) ProtoMessage() {}
 func (*PortStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{157}
+	return fileDescriptor_6c07b07c062484ab, []int{163}
 }
 func (m *PortStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4476,7 +4644,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
 func (m *PortworxVolumeSource) Reset()      { *m = PortworxVolumeSource{} }
 func (*PortworxVolumeSource) ProtoMessage() {}
 func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{158}
+	return fileDescriptor_6c07b07c062484ab, []int{164}
 }
 func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4504,7 +4672,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
 func (m *Preconditions) Reset()      { *m = Preconditions{} }
 func (*Preconditions) ProtoMessage() {}
 func (*Preconditions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{159}
+	return fileDescriptor_6c07b07c062484ab, []int{165}
 }
 func (m *Preconditions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4532,7 +4700,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
 func (m *PreferAvoidPodsEntry) Reset()      { *m = PreferAvoidPodsEntry{} }
 func (*PreferAvoidPodsEntry) ProtoMessage() {}
 func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{160}
+	return fileDescriptor_6c07b07c062484ab, []int{166}
 }
 func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4560,7 +4728,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
 func (m *PreferredSchedulingTerm) Reset()      { *m = PreferredSchedulingTerm{} }
 func (*PreferredSchedulingTerm) ProtoMessage() {}
 func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{161}
+	return fileDescriptor_6c07b07c062484ab, []int{167}
 }
 func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4588,7 +4756,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
 func (m *Probe) Reset()      { *m = Probe{} }
 func (*Probe) ProtoMessage() {}
 func (*Probe) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{162}
+	return fileDescriptor_6c07b07c062484ab, []int{168}
 }
 func (m *Probe) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4616,7 +4784,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
 func (m *ProbeHandler) Reset()      { *m = ProbeHandler{} }
 func (*ProbeHandler) ProtoMessage() {}
 func (*ProbeHandler) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{163}
+	return fileDescriptor_6c07b07c062484ab, []int{169}
 }
 func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4644,7 +4812,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
 func (m *ProjectedVolumeSource) Reset()      { *m = ProjectedVolumeSource{} }
 func (*ProjectedVolumeSource) ProtoMessage() {}
 func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{164}
+	return fileDescriptor_6c07b07c062484ab, []int{170}
 }
 func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4672,7 +4840,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
 func (m *QuobyteVolumeSource) Reset()      { *m = QuobyteVolumeSource{} }
 func (*QuobyteVolumeSource) ProtoMessage() {}
 func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{165}
+	return fileDescriptor_6c07b07c062484ab, []int{171}
 }
 func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4700,7 +4868,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
 func (m *RBDPersistentVolumeSource) Reset()      { *m = RBDPersistentVolumeSource{} }
 func (*RBDPersistentVolumeSource) ProtoMessage() {}
 func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{166}
+	return fileDescriptor_6c07b07c062484ab, []int{172}
 }
 func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4728,7 +4896,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
 func (m *RBDVolumeSource) Reset()      { *m = RBDVolumeSource{} }
 func (*RBDVolumeSource) ProtoMessage() {}
 func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{167}
+	return fileDescriptor_6c07b07c062484ab, []int{173}
 }
 func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4756,7 +4924,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
 func (m *RangeAllocation) Reset()      { *m = RangeAllocation{} }
 func (*RangeAllocation) ProtoMessage() {}
 func (*RangeAllocation) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{168}
+	return fileDescriptor_6c07b07c062484ab, []int{174}
 }
 func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4784,7 +4952,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
 func (m *ReplicationController) Reset()      { *m = ReplicationController{} }
 func (*ReplicationController) ProtoMessage() {}
 func (*ReplicationController) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{169}
+	return fileDescriptor_6c07b07c062484ab, []int{175}
 }
 func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4812,7 +4980,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
 func (m *ReplicationControllerCondition) Reset()      { *m = ReplicationControllerCondition{} }
 func (*ReplicationControllerCondition) ProtoMessage() {}
 func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{170}
+	return fileDescriptor_6c07b07c062484ab, []int{176}
 }
 func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4840,7 +5008,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
 func (m *ReplicationControllerList) Reset()      { *m = ReplicationControllerList{} }
 func (*ReplicationControllerList) ProtoMessage() {}
 func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{171}
+	return fileDescriptor_6c07b07c062484ab, []int{177}
 }
 func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4868,7 +5036,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
 func (m *ReplicationControllerSpec) Reset()      { *m = ReplicationControllerSpec{} }
 func (*ReplicationControllerSpec) ProtoMessage() {}
 func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{172}
+	return fileDescriptor_6c07b07c062484ab, []int{178}
 }
 func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4896,7 +5064,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
 func (m *ReplicationControllerStatus) Reset()      { *m = ReplicationControllerStatus{} }
 func (*ReplicationControllerStatus) ProtoMessage() {}
 func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{173}
+	return fileDescriptor_6c07b07c062484ab, []int{179}
 }
 func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4924,7 +5092,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
 func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
 func (*ResourceClaim) ProtoMessage() {}
 func (*ResourceClaim) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{174}
+	return fileDescriptor_6c07b07c062484ab, []int{180}
 }
 func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4952,7 +5120,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
 func (m *ResourceFieldSelector) Reset()      { *m = ResourceFieldSelector{} }
 func (*ResourceFieldSelector) ProtoMessage() {}
 func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{175}
+	return fileDescriptor_6c07b07c062484ab, []int{181}
 }
 func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -4980,7 +5148,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
 func (m *ResourceHealth) Reset()      { *m = ResourceHealth{} }
 func (*ResourceHealth) ProtoMessage() {}
 func (*ResourceHealth) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{176}
+	return fileDescriptor_6c07b07c062484ab, []int{182}
 }
 func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5008,7 +5176,7 @@ var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
 func (m *ResourceQuota) Reset()      { *m = ResourceQuota{} }
 func (*ResourceQuota) ProtoMessage() {}
 func (*ResourceQuota) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{177}
+	return fileDescriptor_6c07b07c062484ab, []int{183}
 }
 func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5036,7 +5204,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
 func (m *ResourceQuotaList) Reset()      { *m = ResourceQuotaList{} }
 func (*ResourceQuotaList) ProtoMessage() {}
 func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{178}
+	return fileDescriptor_6c07b07c062484ab, []int{184}
 }
 func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5064,7 +5232,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
 func (m *ResourceQuotaSpec) Reset()      { *m = ResourceQuotaSpec{} }
 func (*ResourceQuotaSpec) ProtoMessage() {}
 func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{179}
+	return fileDescriptor_6c07b07c062484ab, []int{185}
 }
 func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5092,7 +5260,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
 func (m *ResourceQuotaStatus) Reset()      { *m = ResourceQuotaStatus{} }
 func (*ResourceQuotaStatus) ProtoMessage() {}
 func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{180}
+	return fileDescriptor_6c07b07c062484ab, []int{186}
 }
 func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5120,7 +5288,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
 func (m *ResourceRequirements) Reset()      { *m = ResourceRequirements{} }
 func (*ResourceRequirements) ProtoMessage() {}
 func (*ResourceRequirements) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{181}
+	return fileDescriptor_6c07b07c062484ab, []int{187}
 }
 func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5148,7 +5316,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
 func (m *ResourceStatus) Reset()      { *m = ResourceStatus{} }
 func (*ResourceStatus) ProtoMessage() {}
 func (*ResourceStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{182}
+	return fileDescriptor_6c07b07c062484ab, []int{188}
 }
 func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5176,7 +5344,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
 func (m *SELinuxOptions) Reset()      { *m = SELinuxOptions{} }
 func (*SELinuxOptions) ProtoMessage() {}
 func (*SELinuxOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{183}
+	return fileDescriptor_6c07b07c062484ab, []int{189}
 }
 func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5204,7 +5372,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
 func (m *ScaleIOPersistentVolumeSource) Reset()      { *m = ScaleIOPersistentVolumeSource{} }
 func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
 func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{184}
+	return fileDescriptor_6c07b07c062484ab, []int{190}
 }
 func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5232,7 +5400,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
 func (m *ScaleIOVolumeSource) Reset()      { *m = ScaleIOVolumeSource{} }
 func (*ScaleIOVolumeSource) ProtoMessage() {}
 func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{185}
+	return fileDescriptor_6c07b07c062484ab, []int{191}
 }
 func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5260,7 +5428,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
 func (m *ScopeSelector) Reset()      { *m = ScopeSelector{} }
 func (*ScopeSelector) ProtoMessage() {}
 func (*ScopeSelector) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{186}
+	return fileDescriptor_6c07b07c062484ab, []int{192}
 }
 func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5288,7 +5456,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
 func (m *ScopedResourceSelectorRequirement) Reset()      { *m = ScopedResourceSelectorRequirement{} }
 func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
 func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{187}
+	return fileDescriptor_6c07b07c062484ab, []int{193}
 }
 func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5316,7 +5484,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
 func (m *SeccompProfile) Reset()      { *m = SeccompProfile{} }
 func (*SeccompProfile) ProtoMessage() {}
 func (*SeccompProfile) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{188}
+	return fileDescriptor_6c07b07c062484ab, []int{194}
 }
 func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5344,7 +5512,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
 func (m *Secret) Reset()      { *m = Secret{} }
 func (*Secret) ProtoMessage() {}
 func (*Secret) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{189}
+	return fileDescriptor_6c07b07c062484ab, []int{195}
 }
 func (m *Secret) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5372,7 +5540,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
 func (m *SecretEnvSource) Reset()      { *m = SecretEnvSource{} }
 func (*SecretEnvSource) ProtoMessage() {}
 func (*SecretEnvSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{190}
+	return fileDescriptor_6c07b07c062484ab, []int{196}
 }
 func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5400,7 +5568,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
 func (m *SecretKeySelector) Reset()      { *m = SecretKeySelector{} }
 func (*SecretKeySelector) ProtoMessage() {}
 func (*SecretKeySelector) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{191}
+	return fileDescriptor_6c07b07c062484ab, []int{197}
 }
 func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5428,7 +5596,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
 func (m *SecretList) Reset()      { *m = SecretList{} }
 func (*SecretList) ProtoMessage() {}
 func (*SecretList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{192}
+	return fileDescriptor_6c07b07c062484ab, []int{198}
 }
 func (m *SecretList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5456,7 +5624,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
 func (m *SecretProjection) Reset()      { *m = SecretProjection{} }
 func (*SecretProjection) ProtoMessage() {}
 func (*SecretProjection) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{193}
+	return fileDescriptor_6c07b07c062484ab, []int{199}
 }
 func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5484,7 +5652,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
 func (m *SecretReference) Reset()      { *m = SecretReference{} }
 func (*SecretReference) ProtoMessage() {}
 func (*SecretReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{194}
+	return fileDescriptor_6c07b07c062484ab, []int{200}
 }
 func (m *SecretReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5512,7 +5680,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
 func (m *SecretVolumeSource) Reset()      { *m = SecretVolumeSource{} }
 func (*SecretVolumeSource) ProtoMessage() {}
 func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{195}
+	return fileDescriptor_6c07b07c062484ab, []int{201}
 }
 func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5540,7 +5708,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
 func (m *SecurityContext) Reset()      { *m = SecurityContext{} }
 func (*SecurityContext) ProtoMessage() {}
 func (*SecurityContext) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{196}
+	return fileDescriptor_6c07b07c062484ab, []int{202}
 }
 func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5568,7 +5736,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
 func (m *SerializedReference) Reset()      { *m = SerializedReference{} }
 func (*SerializedReference) ProtoMessage() {}
 func (*SerializedReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{197}
+	return fileDescriptor_6c07b07c062484ab, []int{203}
 }
 func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5596,7 +5764,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
 func (m *Service) Reset()      { *m = Service{} }
 func (*Service) ProtoMessage() {}
 func (*Service) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{198}
+	return fileDescriptor_6c07b07c062484ab, []int{204}
 }
 func (m *Service) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5624,7 +5792,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
 func (m *ServiceAccount) Reset()      { *m = ServiceAccount{} }
 func (*ServiceAccount) ProtoMessage() {}
 func (*ServiceAccount) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{199}
+	return fileDescriptor_6c07b07c062484ab, []int{205}
 }
 func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5652,7 +5820,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
 func (m *ServiceAccountList) Reset()      { *m = ServiceAccountList{} }
 func (*ServiceAccountList) ProtoMessage() {}
 func (*ServiceAccountList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{200}
+	return fileDescriptor_6c07b07c062484ab, []int{206}
 }
 func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5680,7 +5848,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
 func (m *ServiceAccountTokenProjection) Reset()      { *m = ServiceAccountTokenProjection{} }
 func (*ServiceAccountTokenProjection) ProtoMessage() {}
 func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{201}
+	return fileDescriptor_6c07b07c062484ab, []int{207}
 }
 func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5708,7 +5876,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
 func (m *ServiceList) Reset()      { *m = ServiceList{} }
 func (*ServiceList) ProtoMessage() {}
 func (*ServiceList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{202}
+	return fileDescriptor_6c07b07c062484ab, []int{208}
 }
 func (m *ServiceList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5736,7 +5904,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
 func (m *ServicePort) Reset()      { *m = ServicePort{} }
 func (*ServicePort) ProtoMessage() {}
 func (*ServicePort) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{203}
+	return fileDescriptor_6c07b07c062484ab, []int{209}
 }
 func (m *ServicePort) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5764,7 +5932,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
 func (m *ServiceProxyOptions) Reset()      { *m = ServiceProxyOptions{} }
 func (*ServiceProxyOptions) ProtoMessage() {}
 func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{204}
+	return fileDescriptor_6c07b07c062484ab, []int{210}
 }
 func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5792,7 +5960,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
 func (m *ServiceSpec) Reset()      { *m = ServiceSpec{} }
 func (*ServiceSpec) ProtoMessage() {}
 func (*ServiceSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{205}
+	return fileDescriptor_6c07b07c062484ab, []int{211}
 }
 func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5820,7 +5988,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
 func (m *ServiceStatus) Reset()      { *m = ServiceStatus{} }
 func (*ServiceStatus) ProtoMessage() {}
 func (*ServiceStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{206}
+	return fileDescriptor_6c07b07c062484ab, []int{212}
 }
 func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5848,7 +6016,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
 func (m *SessionAffinityConfig) Reset()      { *m = SessionAffinityConfig{} }
 func (*SessionAffinityConfig) ProtoMessage() {}
 func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{207}
+	return fileDescriptor_6c07b07c062484ab, []int{213}
 }
 func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5876,7 +6044,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
 func (m *SleepAction) Reset()      { *m = SleepAction{} }
 func (*SleepAction) ProtoMessage() {}
 func (*SleepAction) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{208}
+	return fileDescriptor_6c07b07c062484ab, []int{214}
 }
 func (m *SleepAction) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5904,7 +6072,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo
 func (m *StorageOSPersistentVolumeSource) Reset()      { *m = StorageOSPersistentVolumeSource{} }
 func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
 func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{209}
+	return fileDescriptor_6c07b07c062484ab, []int{215}
 }
 func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5932,7 +6100,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
 func (m *StorageOSVolumeSource) Reset()      { *m = StorageOSVolumeSource{} }
 func (*StorageOSVolumeSource) ProtoMessage() {}
 func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{210}
+	return fileDescriptor_6c07b07c062484ab, []int{216}
 }
 func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5960,7 +6128,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
 func (m *Sysctl) Reset()      { *m = Sysctl{} }
 func (*Sysctl) ProtoMessage() {}
 func (*Sysctl) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{211}
+	return fileDescriptor_6c07b07c062484ab, []int{217}
 }
 func (m *Sysctl) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -5988,7 +6156,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
 func (m *TCPSocketAction) Reset()      { *m = TCPSocketAction{} }
 func (*TCPSocketAction) ProtoMessage() {}
 func (*TCPSocketAction) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{212}
+	return fileDescriptor_6c07b07c062484ab, []int{218}
 }
 func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6016,7 +6184,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
 func (m *Taint) Reset()      { *m = Taint{} }
 func (*Taint) ProtoMessage() {}
 func (*Taint) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{213}
+	return fileDescriptor_6c07b07c062484ab, []int{219}
 }
 func (m *Taint) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6044,7 +6212,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
 func (m *Toleration) Reset()      { *m = Toleration{} }
 func (*Toleration) ProtoMessage() {}
 func (*Toleration) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{214}
+	return fileDescriptor_6c07b07c062484ab, []int{220}
 }
 func (m *Toleration) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6072,7 +6240,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
 func (m *TopologySelectorLabelRequirement) Reset()      { *m = TopologySelectorLabelRequirement{} }
 func (*TopologySelectorLabelRequirement) ProtoMessage() {}
 func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{215}
+	return fileDescriptor_6c07b07c062484ab, []int{221}
 }
 func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6100,7 +6268,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
 func (m *TopologySelectorTerm) Reset()      { *m = TopologySelectorTerm{} }
 func (*TopologySelectorTerm) ProtoMessage() {}
 func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{216}
+	return fileDescriptor_6c07b07c062484ab, []int{222}
 }
 func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6128,7 +6296,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
 func (m *TopologySpreadConstraint) Reset()      { *m = TopologySpreadConstraint{} }
 func (*TopologySpreadConstraint) ProtoMessage() {}
 func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{217}
+	return fileDescriptor_6c07b07c062484ab, []int{223}
 }
 func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6156,7 +6324,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
 func (m *TypedLocalObjectReference) Reset()      { *m = TypedLocalObjectReference{} }
 func (*TypedLocalObjectReference) ProtoMessage() {}
 func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{218}
+	return fileDescriptor_6c07b07c062484ab, []int{224}
 }
 func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6184,7 +6352,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
 func (m *TypedObjectReference) Reset()      { *m = TypedObjectReference{} }
 func (*TypedObjectReference) ProtoMessage() {}
 func (*TypedObjectReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{219}
+	return fileDescriptor_6c07b07c062484ab, []int{225}
 }
 func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6212,7 +6380,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
 func (m *Volume) Reset()      { *m = Volume{} }
 func (*Volume) ProtoMessage() {}
 func (*Volume) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{220}
+	return fileDescriptor_6c07b07c062484ab, []int{226}
 }
 func (m *Volume) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6240,7 +6408,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
 func (m *VolumeDevice) Reset()      { *m = VolumeDevice{} }
 func (*VolumeDevice) ProtoMessage() {}
 func (*VolumeDevice) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{221}
+	return fileDescriptor_6c07b07c062484ab, []int{227}
 }
 func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6268,7 +6436,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
 func (m *VolumeMount) Reset()      { *m = VolumeMount{} }
 func (*VolumeMount) ProtoMessage() {}
 func (*VolumeMount) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{222}
+	return fileDescriptor_6c07b07c062484ab, []int{228}
 }
 func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6296,7 +6464,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
 func (m *VolumeMountStatus) Reset()      { *m = VolumeMountStatus{} }
 func (*VolumeMountStatus) ProtoMessage() {}
 func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{223}
+	return fileDescriptor_6c07b07c062484ab, []int{229}
 }
 func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6324,7 +6492,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
 func (m *VolumeNodeAffinity) Reset()      { *m = VolumeNodeAffinity{} }
 func (*VolumeNodeAffinity) ProtoMessage() {}
 func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{224}
+	return fileDescriptor_6c07b07c062484ab, []int{230}
 }
 func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6352,7 +6520,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
 func (m *VolumeProjection) Reset()      { *m = VolumeProjection{} }
 func (*VolumeProjection) ProtoMessage() {}
 func (*VolumeProjection) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{225}
+	return fileDescriptor_6c07b07c062484ab, []int{231}
 }
 func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6380,7 +6548,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
 func (m *VolumeResourceRequirements) Reset()      { *m = VolumeResourceRequirements{} }
 func (*VolumeResourceRequirements) ProtoMessage() {}
 func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{226}
+	return fileDescriptor_6c07b07c062484ab, []int{232}
 }
 func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6408,7 +6576,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
 func (m *VolumeSource) Reset()      { *m = VolumeSource{} }
 func (*VolumeSource) ProtoMessage() {}
 func (*VolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{227}
+	return fileDescriptor_6c07b07c062484ab, []int{233}
 }
 func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6436,7 +6604,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
 func (m *VsphereVirtualDiskVolumeSource) Reset()      { *m = VsphereVirtualDiskVolumeSource{} }
 func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
 func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{228}
+	return fileDescriptor_6c07b07c062484ab, []int{234}
 }
 func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6464,7 +6632,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
 func (m *WeightedPodAffinityTerm) Reset()      { *m = WeightedPodAffinityTerm{} }
 func (*WeightedPodAffinityTerm) ProtoMessage() {}
 func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{229}
+	return fileDescriptor_6c07b07c062484ab, []int{235}
 }
 func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6492,7 +6660,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
 func (m *WindowsSecurityContextOptions) Reset()      { *m = WindowsSecurityContextOptions{} }
 func (*WindowsSecurityContextOptions) ProtoMessage() {}
 func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_6c07b07c062484ab, []int{230}
+	return fileDescriptor_6c07b07c062484ab, []int{236}
 }
 func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -6551,9 +6719,12 @@ func init() {
 	proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection")
 	proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource")
 	proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container")
+	proto.RegisterType((*ContainerExtendedResourceRequest)(nil), "k8s.io.api.core.v1.ContainerExtendedResourceRequest")
 	proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage")
 	proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort")
 	proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy")
+	proto.RegisterType((*ContainerRestartRule)(nil), "k8s.io.api.core.v1.ContainerRestartRule")
+	proto.RegisterType((*ContainerRestartRuleOnExitCodes)(nil), "k8s.io.api.core.v1.ContainerRestartRuleOnExitCodes")
 	proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState")
 	proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning")
 	proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated")
@@ -6583,6 +6754,7 @@ func init() {
 	proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource")
 	proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction")
 	proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource")
+	proto.RegisterType((*FileKeySelector)(nil), "k8s.io.api.core.v1.FileKeySelector")
 	proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource")
 	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry")
 	proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource")
@@ -6671,10 +6843,12 @@ func init() {
 	proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm")
 	proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity")
 	proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions")
+	proto.RegisterType((*PodCertificateProjection)(nil), "k8s.io.api.core.v1.PodCertificateProjection")
 	proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition")
 	proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig")
 	proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption")
 	proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions")
+	proto.RegisterType((*PodExtendedResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodExtendedResourceClaimStatus")
 	proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP")
 	proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList")
 	proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions")
@@ -6787,1020 +6961,1049 @@ func init() {
 }
 
 var fileDescriptor_6c07b07c062484ab = []byte{
-	// 16206 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9,
-	0x75, 0x30, 0xc6, 0xea, 0x9e, 0xf3, 0xcd, 0x9d, 0xb8, 0x06, 0xb3, 0x00, 0x1a, 0x5b, 0xbb, 0x8b,
-	0xc5, 0x5e, 0x03, 0x62, 0x0f, 0x2e, 0xb8, 0xbb, 0x5c, 0xed, 0x9c, 0x40, 0x2f, 0x30, 0x83, 0xde,
-	0xec, 0x01, 0x40, 0x2e, 0x97, 0x14, 0x0b, 0xdd, 0x39, 0x33, 0xc5, 0xe9, 0xae, 0xea, 0xad, 0xaa,
-	0x1e, 0x60, 0x60, 0x2a, 0x24, 0x51, 0x16, 0x25, 0x52, 0x72, 0x04, 0x43, 0x21, 0x59, 0x0e, 0x4a,
-	0xa1, 0x1f, 0xba, 0x65, 0x5a, 0xb2, 0x68, 0xc9, 0x92, 0x2c, 0xea, 0xb2, 0x2d, 0x47, 0xc8, 0xfe,
-	0x21, 0x4b, 0x8a, 0x30, 0xa9, 0xb0, 0xc2, 0x23, 0x73, 0x6c, 0x87, 0x42, 0x3f, 0x2c, 0x29, 0x64,
-	0xff, 0xb0, 0x27, 0xf4, 0x7d, 0xfc, 0x22, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x07, 0x0b, 0x0c, 0x97,
-	0x8c, 0xfd, 0xd7, 0x9d, 0xef, 0xe5, 0xcb, 0xac, 0x3c, 0x5f, 0xbe, 0x13, 0xec, 0xad, 0x4b, 0xe1,
-	0xac, 0xeb, 0x5f, 0x70, 0x5a, 0xee, 0x85, 0x9a, 0x1f, 0x90, 0x0b, 0xdb, 0x17, 0x2f, 0x6c, 0x10,
-	0x8f, 0x04, 0x4e, 0x44, 0xea, 0xb3, 0xad, 0xc0, 0x8f, 0x7c, 0x84, 0x38, 0xce, 0xac, 0xd3, 0x72,
-	0x67, 0x29, 0xce, 0xec, 0xf6, 0xc5, 0x99, 0xe7, 0x36, 0xdc, 0x68, 0xb3, 0x7d, 0x7b, 0xb6, 0xe6,
-	0x37, 0x2f, 0x6c, 0xf8, 0x1b, 0xfe, 0x05, 0x86, 0x7a, 0xbb, 0xbd, 0xce, 0xfe, 0xb1, 0x3f, 0xec,
-	0x17, 0x27, 0x31, 0xf3, 0x62, 0xdc, 0x4c, 0xd3, 0xa9, 0x6d, 0xba, 0x1e, 0x09, 0x76, 0x2e, 0xb4,
-	0xb6, 0x36, 0x58, 0xbb, 0x01, 0x09, 0xfd, 0x76, 0x50, 0x23, 0xc9, 0x86, 0x3b, 0xd6, 0x0a, 0x2f,
-	0x34, 0x49, 0xe4, 0x64, 0x74, 0x77, 0xe6, 0x42, 0x5e, 0xad, 0xa0, 0xed, 0x45, 0x6e, 0x33, 0xdd,
-	0xcc, 0x47, 0xba, 0x55, 0x08, 0x6b, 0x9b, 0xa4, 0xe9, 0xa4, 0xea, 0xbd, 0x90, 0x57, 0xaf, 0x1d,
-	0xb9, 0x8d, 0x0b, 0xae, 0x17, 0x85, 0x51, 0x90, 0xac, 0x64, 0x7f, 0xd3, 0x82, 0xb3, 0x73, 0xb7,
-	0xaa, 0x4b, 0x0d, 0x27, 0x8c, 0xdc, 0xda, 0x7c, 0xc3, 0xaf, 0x6d, 0x55, 0x23, 0x3f, 0x20, 0x37,
-	0xfd, 0x46, 0xbb, 0x49, 0xaa, 0x6c, 0x20, 0xd0, 0xb3, 0x30, 0xb4, 0xcd, 0xfe, 0x97, 0x17, 0xa7,
-	0xad, 0xb3, 0xd6, 0xf9, 0xe1, 0xf9, 0xc9, 0xbf, 0xd8, 0x2d, 0x7d, 0x68, 0x6f, 0xb7, 0x34, 0x74,
-	0x53, 0x94, 0x63, 0x85, 0x81, 0xce, 0xc1, 0xc0, 0x7a, 0xb8, 0xb6, 0xd3, 0x22, 0xd3, 0x05, 0x86,
-	0x3b, 0x2e, 0x70, 0x07, 0x96, 0xab, 0xb4, 0x14, 0x0b, 0x28, 0xba, 0x00, 0xc3, 0x2d, 0x27, 0x88,
-	0xdc, 0xc8, 0xf5, 0xbd, 0xe9, 0xe2, 0x59, 0xeb, 0x7c, 0xff, 0xfc, 0x94, 0x40, 0x1d, 0xae, 0x48,
-	0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0xd4, 0xaf, 0x7b, 0x8d, 0x9d, 0xe9, 0xbe, 0xb3, 0xd6,
-	0xf9, 0xa1, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xa5, 0x00, 0x43, 0x73, 0xeb, 0xeb,
-	0xae, 0xe7, 0x46, 0x3b, 0xe8, 0x26, 0x8c, 0x7a, 0x7e, 0x9d, 0xc8, 0xff, 0xec, 0x2b, 0x46, 0x9e,
-	0x3f, 0x3b, 0x9b, 0x5e, 0x4a, 0xb3, 0xab, 0x1a, 0xde, 0xfc, 0xe4, 0xde, 0x6e, 0x69, 0x54, 0x2f,
-	0xc1, 0x06, 0x1d, 0x84, 0x61, 0xa4, 0xe5, 0xd7, 0x15, 0xd9, 0x02, 0x23, 0x5b, 0xca, 0x22, 0x5b,
-	0x89, 0xd1, 0xe6, 0x27, 0xf6, 0x76, 0x4b, 0x23, 0x5a, 0x01, 0xd6, 0x89, 0xa0, 0xdb, 0x30, 0x41,
-	0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0xc7, 0xf2, 0xe8, 0x6a, 0xa8, 0xf3, 0x47, 0xf6,
-	0x76, 0x4b, 0x13, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xff, 0xa4, 0x05, 0x13, 0x73, 0xad, 0xd6, 0x5c,
-	0xd0, 0xf4, 0x83, 0x4a, 0xe0, 0xaf, 0xbb, 0x0d, 0x82, 0x5e, 0x86, 0xbe, 0x88, 0xce, 0x1a, 0x9f,
-	0xe1, 0xc7, 0xc4, 0xd0, 0xf6, 0xd1, 0xb9, 0xda, 0xdf, 0x2d, 0x1d, 0x49, 0xa0, 0xb3, 0xa9, 0x64,
-	0x15, 0xd0, 0x1b, 0x30, 0xd9, 0xf0, 0x6b, 0x4e, 0x63, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f,
-	0x74, 0x6f, 0xb7, 0x34, 0x79, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x1e, 0x8c, 0xcf, 0x45, 0x91,
-	0x53, 0xdb, 0x24, 0x75, 0xbe, 0xa0, 0xd0, 0x8b, 0xd0, 0xe7, 0x39, 0x4d, 0xd9, 0x99, 0xb3, 0xb2,
-	0x33, 0xab, 0x4e, 0x93, 0x76, 0x66, 0xf2, 0x86, 0xe7, 0xbe, 0xdb, 0x16, 0x8b, 0x94, 0x96, 0x61,
-	0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x6d, 0xb7, 0x46, 0x2a, 0x4e, 0xb4, 0x29, 0xfa, 0x80, 0x44,
-	0x5d, 0x58, 0x54, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x78, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf,
-	0x1e, 0xa2, 0x2d, 0x98, 0x68, 0x05, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xda, 0x3a, 0x5b, 0x3c, 0x3f,
-	0xf2, 0xfc, 0xf9, 0xcc, 0xb1, 0x37, 0x51, 0x97, 0xbc, 0x28, 0xd8, 0x99, 0x3f, 0x21, 0xda, 0x9b,
-	0x48, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xe7, 0x05, 0x38, 0x36, 0x77, 0xaf, 0x1d, 0x90, 0x45, 0x37,
-	0xdc, 0x4a, 0x6e, 0xb8, 0xba, 0x1b, 0x6e, 0xad, 0xc6, 0x23, 0xa0, 0x56, 0xfa, 0xa2, 0x28, 0xc7,
-	0x0a, 0x03, 0x3d, 0x07, 0x83, 0xf4, 0xf7, 0x0d, 0x5c, 0x16, 0x9f, 0x7c, 0x44, 0x20, 0x8f, 0x2c,
-	0x3a, 0x91, 0xb3, 0xc8, 0x41, 0x58, 0xe2, 0xa0, 0x15, 0x18, 0xa9, 0xb1, 0xf3, 0x61, 0x63, 0xc5,
-	0xaf, 0x13, 0xb6, 0xb6, 0x86, 0xe7, 0x9f, 0xa1, 0xe8, 0x0b, 0x71, 0xf1, 0xfe, 0x6e, 0x69, 0x9a,
-	0xf7, 0x4d, 0x90, 0xd0, 0x60, 0x58, 0xaf, 0x8f, 0x6c, 0xb5, 0xdd, 0xfb, 0x18, 0x25, 0xc8, 0xd8,
-	0xea, 0xe7, 0xb5, 0x9d, 0xdb, 0xcf, 0x76, 0xee, 0x68, 0xf6, 0xae, 0x45, 0x17, 0xa1, 0x6f, 0xcb,
-	0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd6, 0x69, 0x3a, 0xe7, 0x57, 0x5d, 0xaf, 0xbe, 0xbf, 0x5b, 0x9a,
-	0x32, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0x1f, 0x0b, 0x4a, 0x0c, 0xb6, 0xec, 0x36, 0x48,
-	0x85, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x1e, 0x20, 0x24, 0xb5, 0x80, 0x44,
-	0xda, 0x90, 0xaa, 0x85, 0x51, 0x55, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, 0xb6,
-	0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x2a, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xb7, 0xf3,
-	0x09, 0x7d, 0x0c, 0x26, 0xe2, 0xc6, 0xc2, 0x96, 0x53, 0x93, 0x03, 0xc8, 0x76, 0x70, 0xd5, 0x04,
-	0xe1, 0x24, 0xae, 0xfd, 0x9f, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, 0x7e, 0x9f, 0x7f, 0xab, 0xfd, 0x07,
-	0x16, 0x0c, 0xce, 0xbb, 0x5e, 0xdd, 0xf5, 0x36, 0xd0, 0x67, 0x60, 0x88, 0x5e, 0x95, 0x75, 0x27,
-	0x72, 0xc4, 0x31, 0xfc, 0x61, 0x6d, 0x6f, 0xa9, 0x9b, 0x6b, 0xb6, 0xb5, 0xb5, 0x41, 0x0b, 0xc2,
-	0x59, 0x8a, 0x4d, 0x77, 0xdb, 0xf5, 0xdb, 0x9f, 0x25, 0xb5, 0x68, 0x85, 0x44, 0x4e, 0xfc, 0x39,
-	0x71, 0x19, 0x56, 0x54, 0xd1, 0x55, 0x18, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xe2, 0x3c, 0xce, 0x3c,
-	0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x6a, 0x24, 0xbe, 0xa5, 0xd6, 0x58, 0x55, 0x2c, 0x48,
-	0xd8, 0xff, 0x6e, 0x10, 0x4e, 0x2e, 0x54, 0xcb, 0x39, 0xeb, 0xea, 0x1c, 0x0c, 0xd4, 0x03, 0x77,
-	0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x45, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x82, 0x51, 0x7e, 0x3f,
-	0x5e, 0x71, 0xbc, 0x7a, 0x7c, 0x3c, 0x0a, 0xec, 0xd1, 0x9b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, 0x5c,
-	0x54, 0xe7, 0x12, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xa2, 0x05, 0x93, 0xbc, 0x99, 0xb9, 0x28, 0x0a,
-	0xdc, 0xdb, 0xed, 0x88, 0x84, 0xd3, 0xfd, 0xec, 0xa4, 0x5b, 0xc8, 0x1a, 0xad, 0xdc, 0x11, 0x98,
-	0xbd, 0x99, 0xa0, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee, 0x64, 0x12, 0x8c, 0x53, 0xcd, 0xa2, 0x1f,
-	0xb1, 0x60, 0xa6, 0xe6, 0x7b, 0x51, 0xe0, 0x37, 0x1a, 0x24, 0xa8, 0xb4, 0x6f, 0x37, 0xdc, 0x70,
-	0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0x99, 0xbd,
-	0xdd, 0xd2, 0xcc, 0x42, 0x2e, 0x29, 0xdc, 0xa1, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0xd5, 0xc8,
-	0xd9, 0x20, 0x71, 0xe3, 0x83, 0xbd, 0x37, 0x7e, 0x7c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, 0x70,
-	0x06, 0x59, 0xf4, 0x2e, 0x1c, 0xa5, 0xa5, 0xa9, 0x6f, 0x1d, 0xea, 0xbd, 0xb9, 0xe9, 0xbd, 0xdd,
-	0xd2, 0xd1, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x21, 0x0b, 0x4e, 0xc6, 0x9f, 0xbf, 0x74,
-	0xb7, 0xe5, 0x78, 0xf5, 0xb8, 0xe1, 0xe1, 0xde, 0x1b, 0xa6, 0x67, 0xf2, 0xc9, 0x85, 0x3c, 0x4a,
-	0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x11, 0xda, 0xb5, 0x64, 0xdb, 0xd0, 0x7b, 0xdb, 0x27, 0xf6, 0x76,
-	0x4b, 0x47, 0x56, 0xd3, 0x34, 0x70, 0x16, 0xe1, 0x99, 0x05, 0x38, 0x96, 0xb9, 0x3a, 0xd1, 0x24,
-	0x14, 0xb7, 0x08, 0x67, 0x02, 0x87, 0x31, 0xfd, 0x89, 0x8e, 0x42, 0xff, 0xb6, 0xd3, 0x68, 0x8b,
-	0x8d, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x97, 0x2c, 0xfb, 0x7f, 0x28, 0xc2, 0xc4, 0x42, 0xb5, 0x7c,
-	0x5f, 0xbb, 0x5e, 0xbf, 0xf6, 0x0a, 0x1d, 0xaf, 0xbd, 0xf8, 0x12, 0x2d, 0xe6, 0x5e, 0xa2, 0x3f,
-	0x98, 0xb1, 0x65, 0xfb, 0xd8, 0x96, 0xfd, 0x68, 0xce, 0x96, 0x7d, 0xc0, 0x1b, 0x75, 0x3b, 0x67,
-	0xd5, 0xf6, 0xb3, 0x09, 0xcc, 0xe4, 0x90, 0x18, 0xef, 0x97, 0x3c, 0x6a, 0x0f, 0xb8, 0x74, 0x1f,
-	0xcc, 0x3c, 0xd6, 0x60, 0x74, 0xc1, 0x69, 0x39, 0xb7, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f,
-	0x42, 0xd1, 0xa9, 0xd7, 0x19, 0x77, 0x37, 0x3c, 0x7f, 0x6c, 0x6f, 0xb7, 0x54, 0x9c, 0xab, 0x53,
-	0x36, 0x03, 0x14, 0xd6, 0x0e, 0xa6, 0x18, 0xe8, 0x69, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0,
-	0x30, 0xe9, 0x2e, 0xef, 0x5b, 0x0c, 0xfc, 0x56, 0x02, 0x95, 0xe1, 0xd8, 0x7f, 0x56, 0x80, 0x53,
-	0x0b, 0xa4, 0xb5, 0xb9, 0x5c, 0xcd, 0xb9, 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f,
-	0x42, 0xd1, 0x34, 0x5b, 0x11, 0x2b, 0xa2, 0x0c, 0x2b, 0x28, 0x3a, 0x0b, 0x7d, 0xad, 0x98, 0x89,
-	0x1d, 0x95, 0x0c, 0x30, 0x63, 0x5f, 0x19, 0x84, 0x62, 0xb4, 0x43, 0x12, 0x88, 0x15, 0xa3, 0x30,
-	0x6e, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x73, 0x02, 0x94, 0x47, 0x10, 0x37, 0x42, 0x82, 0x13, 0xa0,
-	0x10, 0xac, 0x61, 0xa1, 0x0a, 0x0c, 0x87, 0x89, 0x99, 0xed, 0x69, 0x6b, 0x8e, 0x31, 0x56, 0x41,
-	0xcd, 0x64, 0x4c, 0xc4, 0xb8, 0xc1, 0x06, 0xba, 0xb2, 0x0a, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8,
-	0x5d, 0x36, 0x70, 0x37, 0xd2, 0x03, 0xd7, 0xfb, 0x96, 0x78, 0x50, 0xa3, 0xf7, 0xff, 0x5a, 0x70,
-	0x6a, 0xc1, 0xf5, 0xea, 0x24, 0xc8, 0x59, 0x80, 0x0f, 0xe7, 0x29, 0x7f, 0x30, 0x26, 0xc5, 0x58,
-	0x62, 0x7d, 0x0f, 0x60, 0x89, 0xd9, 0xff, 0x6c, 0x01, 0xe2, 0x9f, 0xfd, 0xbe, 0xfb, 0xd8, 0x1b,
-	0xe9, 0x8f, 0x7d, 0x00, 0xcb, 0xc2, 0xbe, 0x06, 0xe3, 0x0b, 0x0d, 0x97, 0x78, 0x51, 0xb9, 0xb2,
-	0xe0, 0x7b, 0xeb, 0xee, 0x06, 0x7a, 0x05, 0xc6, 0x23, 0xb7, 0x49, 0xfc, 0x76, 0x54, 0x25, 0x35,
-	0xdf, 0x63, 0x2f, 0x57, 0xeb, 0x7c, 0xff, 0x3c, 0xda, 0xdb, 0x2d, 0x8d, 0xaf, 0x19, 0x10, 0x9c,
-	0xc0, 0xb4, 0x7f, 0x95, 0x9e, 0x5b, 0x8d, 0x76, 0x18, 0x91, 0x60, 0x2d, 0x68, 0x87, 0xd1, 0x7c,
-	0x9b, 0xf2, 0x9e, 0x95, 0xc0, 0xa7, 0xdd, 0x71, 0x7d, 0x0f, 0x9d, 0x32, 0x9e, 0xe3, 0x43, 0xf2,
-	0x29, 0x2e, 0x9e, 0xdd, 0xb3, 0x00, 0xa1, 0xbb, 0xe1, 0x91, 0x40, 0x7b, 0x3e, 0x8c, 0xb3, 0xad,
-	0xa2, 0x4a, 0xb1, 0x86, 0x81, 0x1a, 0x30, 0xd6, 0x70, 0x6e, 0x93, 0x46, 0x95, 0x34, 0x48, 0x2d,
-	0xf2, 0x03, 0x21, 0xdf, 0x78, 0xa1, 0xb7, 0x77, 0xc0, 0x35, 0xbd, 0xea, 0xfc, 0xd4, 0xde, 0x6e,
-	0x69, 0xcc, 0x28, 0xc2, 0x26, 0x71, 0x7a, 0x74, 0xf8, 0x2d, 0xfa, 0x15, 0x4e, 0x43, 0x7f, 0x7c,
-	0x5e, 0x17, 0x65, 0x58, 0x41, 0xd5, 0xd1, 0xd1, 0x97, 0x77, 0x74, 0xd8, 0x7f, 0x47, 0x17, 0x9a,
-	0xdf, 0x6c, 0xf9, 0x1e, 0xf1, 0xa2, 0x05, 0xdf, 0xab, 0x73, 0xc9, 0xd4, 0x2b, 0x86, 0xe8, 0xe4,
-	0x5c, 0x42, 0x74, 0x72, 0x3c, 0x5d, 0x43, 0x93, 0x9e, 0x7c, 0x14, 0x06, 0xc2, 0xc8, 0x89, 0xda,
-	0xa1, 0x18, 0xb8, 0x47, 0xe5, 0xb2, 0xab, 0xb2, 0xd2, 0xfd, 0xdd, 0xd2, 0x84, 0xaa, 0xc6, 0x8b,
-	0xb0, 0xa8, 0x80, 0x9e, 0x82, 0xc1, 0x26, 0x09, 0x43, 0x67, 0x43, 0xb2, 0x0d, 0x13, 0xa2, 0xee,
-	0xe0, 0x0a, 0x2f, 0xc6, 0x12, 0x8e, 0x1e, 0x83, 0x7e, 0x12, 0x04, 0x7e, 0x20, 0xbe, 0x6d, 0x4c,
-	0x20, 0xf6, 0x2f, 0xd1, 0x42, 0xcc, 0x61, 0xf6, 0xff, 0x6c, 0xc1, 0x84, 0xea, 0x2b, 0x6f, 0xeb,
-	0x10, 0x9e, 0x6b, 0x6f, 0x03, 0xd4, 0xe4, 0x07, 0x86, 0xec, 0x9a, 0x1d, 0x79, 0xfe, 0x5c, 0x26,
-	0x47, 0x93, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe, 0x63, 0x0b, 0x8e, 0x24,
-	0xbe, 0xe8, 0x9a, 0x1b, 0x46, 0xe8, 0x9d, 0xd4, 0x57, 0xcd, 0xf6, 0xb8, 0xf8, 0xdc, 0x90, 0x7f,
-	0x93, 0xda, 0xf3, 0xb2, 0x44, 0xfb, 0xa2, 0x2b, 0xd0, 0xef, 0x46, 0xa4, 0x29, 0x3f, 0xe6, 0xb1,
-	0x8e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x99, 0xd6, 0xc4, 0x9c, 0x80, 0xfd, 0x67, 0x45, 0x18,
-	0xe6, 0xfb, 0x7b, 0xc5, 0x69, 0x1d, 0xc2, 0x5c, 0x3c, 0x03, 0xc3, 0x6e, 0xb3, 0xd9, 0x8e, 0x9c,
-	0xdb, 0xe2, 0xde, 0x1b, 0xe2, 0x67, 0x50, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0xca, 0xd0, 0xc7, 0xba,
-	0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, 0x7d, 0x9f, 0x5d, 0x74, 0x22, 0x87, 0xb3, 0x9c, 0x6a,
-	0x5f, 0xd1, 0x22, 0xcc, 0x48, 0x20, 0x07, 0xe0, 0xb6, 0xeb, 0x39, 0xc1, 0x0e, 0x2d, 0x9b, 0x2e,
-	0x32, 0x82, 0xcf, 0x75, 0x26, 0x38, 0xaf, 0xf0, 0x39, 0x59, 0xf5, 0x61, 0x31, 0x00, 0x6b, 0x44,
-	0x67, 0x5e, 0x86, 0x61, 0x85, 0x7c, 0x10, 0xce, 0x71, 0xe6, 0x63, 0x30, 0x91, 0x68, 0xab, 0x5b,
-	0xf5, 0x51, 0x9d, 0xf1, 0xfc, 0x43, 0x76, 0x64, 0x88, 0x5e, 0x2f, 0x79, 0xdb, 0xe2, 0x6e, 0xba,
-	0x07, 0x47, 0x1b, 0x19, 0x47, 0xbe, 0x98, 0xd7, 0xde, 0xaf, 0x88, 0x53, 0xe2, 0xb3, 0x8f, 0x66,
-	0x41, 0x71, 0x66, 0x1b, 0xc6, 0x89, 0x58, 0xe8, 0x74, 0x22, 0xd2, 0xf3, 0xee, 0xa8, 0xea, 0xfc,
-	0x55, 0xb2, 0xa3, 0x0e, 0xd5, 0xef, 0x64, 0xf7, 0x4f, 0xf3, 0xd1, 0xe7, 0xc7, 0xe5, 0x88, 0x20,
-	0x50, 0xbc, 0x4a, 0x76, 0xf8, 0x54, 0xe8, 0x5f, 0x57, 0xec, 0xf8, 0x75, 0x5f, 0xb3, 0x60, 0x4c,
-	0x7d, 0xdd, 0x21, 0x9c, 0x0b, 0xf3, 0xe6, 0xb9, 0x70, 0xba, 0xe3, 0x02, 0xcf, 0x39, 0x11, 0xbe,
-	0x5e, 0x80, 0x93, 0x0a, 0x87, 0x3e, 0xa2, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0xc0, 0xb0, 0xa7, 0xc4,
-	0x89, 0x96, 0x29, 0xc7, 0x8b, 0x85, 0x89, 0x31, 0x0e, 0xbd, 0xf2, 0xbc, 0xf8, 0xd2, 0x1e, 0xd5,
-	0xe5, 0xec, 0xe2, 0x72, 0x9f, 0x87, 0x62, 0xdb, 0xad, 0x8b, 0x0b, 0xe6, 0xc3, 0x72, 0xb4, 0x6f,
-	0x94, 0x17, 0xf7, 0x77, 0x4b, 0x8f, 0xe6, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0xde, 0x28, 0x2f,
-	0x62, 0x5a, 0x19, 0xcd, 0xc1, 0x84, 0xd4, 0xaa, 0xdd, 0xa4, 0x7c, 0xa9, 0xef, 0x89, 0x7b, 0x48,
-	0x09, 0xcb, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0xb4, 0x08, 0x93, 0x5b, 0xed, 0xdb, 0xa4, 0x41, 0x22,
-	0xfe, 0xc1, 0x57, 0x09, 0x17, 0x25, 0x0f, 0xc7, 0x4f, 0xd8, 0xab, 0x09, 0x38, 0x4e, 0xd5, 0xb0,
-	0xbf, 0xcd, 0xee, 0x03, 0x31, 0x7a, 0x1a, 0x7f, 0xf3, 0x9d, 0x5c, 0xce, 0xbd, 0xac, 0x8a, 0xab,
-	0x64, 0x67, 0xcd, 0xa7, 0x7c, 0x48, 0xf6, 0xaa, 0x30, 0xd6, 0x7c, 0x5f, 0xc7, 0x35, 0xff, 0xbb,
-	0x05, 0x38, 0xa6, 0x46, 0xc0, 0xe0, 0x96, 0xbf, 0xdb, 0xc7, 0xe0, 0x22, 0x8c, 0xd4, 0xc9, 0xba,
-	0xd3, 0x6e, 0x44, 0x4a, 0xaf, 0xd1, 0xcf, 0x55, 0x6d, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0x0e, 0x30,
-	0x6c, 0xbf, 0x39, 0xc6, 0x2e, 0xe2, 0xc8, 0xa1, 0x6b, 0x5c, 0xed, 0x1a, 0x2b, 0x77, 0xd7, 0x3c,
-	0x06, 0xfd, 0x6e, 0x93, 0x32, 0x66, 0x05, 0x93, 0xdf, 0x2a, 0xd3, 0x42, 0xcc, 0x61, 0xe8, 0x09,
-	0x18, 0xac, 0xf9, 0xcd, 0xa6, 0xe3, 0xd5, 0xd9, 0x95, 0x37, 0x3c, 0x3f, 0x42, 0x79, 0xb7, 0x05,
-	0x5e, 0x84, 0x25, 0x8c, 0x32, 0xdf, 0x4e, 0xb0, 0xc1, 0x85, 0x3d, 0x82, 0xf9, 0x9e, 0x0b, 0x36,
-	0x42, 0xcc, 0x4a, 0xe9, 0x5b, 0xf5, 0x8e, 0x1f, 0x6c, 0xb9, 0xde, 0xc6, 0xa2, 0x1b, 0x88, 0x2d,
-	0xa1, 0xee, 0xc2, 0x5b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x0c, 0xfd, 0x2d, 0x3f, 0x88, 0xc2, 0xe9,
-	0x01, 0x36, 0xdc, 0x8f, 0xe6, 0x1c, 0x44, 0xfc, 0x6b, 0x2b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff,
-	0x85, 0x98, 0x57, 0x47, 0xd7, 0x60, 0x90, 0x78, 0xdb, 0xcb, 0x81, 0xdf, 0x9c, 0x3e, 0x92, 0x4f,
-	0x69, 0x89, 0xa3, 0xf0, 0x65, 0x16, 0xf3, 0xa8, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0xa3, 0x50, 0x24,
-	0xde, 0xf6, 0xf4, 0x20, 0xa3, 0x34, 0x93, 0x43, 0xe9, 0xa6, 0x13, 0xc4, 0x67, 0xfe, 0x92, 0xb7,
-	0x8d, 0x69, 0x1d, 0xf4, 0x09, 0x18, 0x96, 0x07, 0x46, 0x28, 0xa4, 0xa8, 0x99, 0x0b, 0x56, 0x1e,
-	0x33, 0x98, 0xbc, 0xdb, 0x76, 0x03, 0xd2, 0x24, 0x5e, 0x14, 0xc6, 0x27, 0xa4, 0x84, 0x86, 0x38,
-	0xa6, 0x86, 0x6a, 0x30, 0x1a, 0x90, 0xd0, 0xbd, 0x47, 0x2a, 0x7e, 0xc3, 0xad, 0xed, 0x4c, 0x9f,
-	0x60, 0xdd, 0x7b, 0xaa, 0xe3, 0x90, 0x61, 0xad, 0x42, 0x2c, 0xe5, 0xd7, 0x4b, 0xb1, 0x41, 0x14,
-	0xbd, 0x05, 0x63, 0x01, 0x09, 0x23, 0x27, 0x88, 0x44, 0x2b, 0xd3, 0x4a, 0x2b, 0x37, 0x86, 0x75,
-	0x00, 0x7f, 0x4e, 0xc4, 0xcd, 0xc4, 0x10, 0x6c, 0x52, 0x40, 0x9f, 0x90, 0x2a, 0x87, 0x15, 0xbf,
-	0xed, 0x45, 0xe1, 0xf4, 0x30, 0xeb, 0x77, 0xa6, 0x6e, 0xfa, 0x66, 0x8c, 0x97, 0xd4, 0x49, 0xf0,
-	0xca, 0xd8, 0x20, 0x85, 0x3e, 0x05, 0x63, 0xfc, 0x3f, 0x57, 0xa9, 0x86, 0xd3, 0xc7, 0x18, 0xed,
-	0xb3, 0xf9, 0xb4, 0x39, 0xe2, 0xfc, 0x31, 0x41, 0x7c, 0x4c, 0x2f, 0x0d, 0xb1, 0x49, 0x0d, 0x61,
-	0x18, 0x6b, 0xb8, 0xdb, 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0xdb, 0x44, 0x48, 0x88, 0x4f, 0x66,
-	0xab, 0x60, 0xfd, 0xdb, 0x44, 0x3c, 0x02, 0xf5, 0x3a, 0xd8, 0x24, 0x81, 0x6e, 0xc0, 0x38, 0x7d,
-	0x92, 0xbb, 0x31, 0xd1, 0x91, 0x6e, 0x44, 0xd9, 0xc3, 0x19, 0x1b, 0x95, 0x70, 0x82, 0x08, 0xba,
-	0x0e, 0xa3, 0x6c, 0xcc, 0xdb, 0x2d, 0x4e, 0xf4, 0x78, 0x37, 0xa2, 0xcc, 0xa0, 0xa0, 0xaa, 0x55,
-	0xc1, 0x06, 0x01, 0xf4, 0x26, 0x0c, 0x37, 0xdc, 0x75, 0x52, 0xdb, 0xa9, 0x35, 0xc8, 0xf4, 0x28,
-	0xa3, 0x96, 0x79, 0x18, 0x5e, 0x93, 0x48, 0x9c, 0x3f, 0x57, 0x7f, 0x71, 0x5c, 0x1d, 0xdd, 0x84,
-	0xe3, 0x11, 0x09, 0x9a, 0xae, 0xe7, 0xd0, 0x43, 0x4c, 0x3c, 0x09, 0x99, 0x66, 0x7c, 0x8c, 0xad,
-	0xae, 0x33, 0x62, 0x36, 0x8e, 0xaf, 0x65, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x2e, 0x4c, 0x67, 0x40,
-	0xf8, 0xba, 0x3d, 0xca, 0x28, 0xbf, 0x26, 0x28, 0x4f, 0xaf, 0xe5, 0xe0, 0xed, 0x77, 0x80, 0xe1,
-	0x5c, 0xea, 0xe8, 0x3a, 0x4c, 0xb0, 0x93, 0xb3, 0xd2, 0x6e, 0x34, 0x44, 0x83, 0xe3, 0xac, 0xc1,
-	0x27, 0x24, 0x1f, 0x51, 0x36, 0xc1, 0xfb, 0xbb, 0x25, 0x88, 0xff, 0xe1, 0x64, 0x6d, 0x74, 0x9b,
-	0x29, 0x61, 0xdb, 0x81, 0x1b, 0xed, 0xd0, 0x5d, 0x45, 0xee, 0x46, 0xd3, 0x13, 0x1d, 0x05, 0x52,
-	0x3a, 0xaa, 0xd2, 0xd4, 0xea, 0x85, 0x38, 0x49, 0x90, 0x5e, 0x05, 0x61, 0x54, 0x77, 0xbd, 0xe9,
-	0x49, 0xfe, 0x9e, 0x92, 0x27, 0x69, 0x95, 0x16, 0x62, 0x0e, 0x63, 0x0a, 0x58, 0xfa, 0xe3, 0x3a,
-	0xbd, 0x71, 0xa7, 0x18, 0x62, 0xac, 0x80, 0x95, 0x00, 0x1c, 0xe3, 0x50, 0x26, 0x38, 0x8a, 0x76,
-	0xa6, 0x11, 0x43, 0x55, 0x07, 0xe2, 0xda, 0xda, 0x27, 0x30, 0x2d, 0xb7, 0x6f, 0xc3, 0xb8, 0x3a,
-	0x26, 0xd8, 0x98, 0xa0, 0x12, 0xf4, 0x33, 0xb6, 0x4f, 0x88, 0x4f, 0x87, 0x69, 0x17, 0x18, 0x4b,
-	0x88, 0x79, 0x39, 0xeb, 0x82, 0x7b, 0x8f, 0xcc, 0xef, 0x44, 0x84, 0xcb, 0x22, 0x8a, 0x5a, 0x17,
-	0x24, 0x00, 0xc7, 0x38, 0xf6, 0xbf, 0xe7, 0xec, 0x73, 0x7c, 0x4b, 0xf4, 0x70, 0x2f, 0x3e, 0x0b,
-	0x43, 0xcc, 0xf0, 0xc3, 0x0f, 0xb8, 0x76, 0xb6, 0x3f, 0x66, 0x98, 0xaf, 0x88, 0x72, 0xac, 0x30,
-	0xd0, 0xab, 0x30, 0x56, 0xd3, 0x1b, 0x10, 0x97, 0xba, 0x3a, 0x46, 0x8c, 0xd6, 0xb1, 0x89, 0x8b,
-	0x2e, 0xc1, 0x10, 0xb3, 0x71, 0xaa, 0xf9, 0x0d, 0xc1, 0x6d, 0x4a, 0xce, 0x64, 0xa8, 0x22, 0xca,
-	0xf7, 0xb5, 0xdf, 0x58, 0x61, 0xa3, 0x73, 0x30, 0x40, 0xbb, 0x50, 0xae, 0x88, 0xeb, 0x54, 0x49,
-	0x02, 0xaf, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0x1f, 0x5b, 0x8c, 0x97, 0x4a, 0x9f, 0xf9, 0xe8, 0x0a,
-	0xbb, 0x34, 0xd8, 0x0d, 0xa2, 0x69, 0xe1, 0x1f, 0xd7, 0x6e, 0x02, 0x05, 0xdb, 0x4f, 0xfc, 0xc7,
-	0x46, 0x4d, 0xf4, 0x76, 0xf2, 0x66, 0xe0, 0x0c, 0xc5, 0x8b, 0x72, 0x08, 0x92, 0xb7, 0xc3, 0x23,
-	0xf1, 0x15, 0x47, 0xfb, 0xd3, 0xe9, 0x8a, 0xb0, 0x7f, 0xaa, 0xa0, 0xad, 0x92, 0x6a, 0xe4, 0x44,
-	0x04, 0x55, 0x60, 0xf0, 0x8e, 0xe3, 0x46, 0xae, 0xb7, 0x21, 0xf8, 0xbe, 0xce, 0x17, 0x1d, 0xab,
-	0x74, 0x8b, 0x57, 0xe0, 0xdc, 0x8b, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0x68, 0x7b, 0x1e, 0xa5,
-	0x58, 0xe8, 0x95, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, 0x1d, 0x00, 0x79,
-	0x42, 0x90, 0xba, 0x90, 0x1d, 0x3e, 0xdb, 0x9d, 0xe8, 0x9a, 0xaa, 0xc3, 0x85, 0x93, 0xf1, 0x7f,
-	0xac, 0xd1, 0xb3, 0x23, 0x6d, 0x4e, 0xf5, 0xce, 0xa0, 0x4f, 0xd2, 0x2d, 0xea, 0x04, 0x11, 0xa9,
-	0xcf, 0x45, 0x62, 0x70, 0x9e, 0xee, 0xed, 0x71, 0xb8, 0xe6, 0x36, 0x89, 0xbe, 0x9d, 0x05, 0x11,
-	0x1c, 0xd3, 0xb3, 0x7f, 0xbf, 0x08, 0xd3, 0x79, 0xdd, 0xa5, 0x9b, 0x86, 0xdc, 0x75, 0xa3, 0x05,
-	0xca, 0xd6, 0x5a, 0xe6, 0xa6, 0x59, 0x12, 0xe5, 0x58, 0x61, 0xd0, 0xd5, 0x1b, 0xba, 0x1b, 0xf2,
-	0x6d, 0xdf, 0x1f, 0xaf, 0xde, 0x2a, 0x2b, 0xc5, 0x02, 0x4a, 0xf1, 0x02, 0xe2, 0x84, 0xc2, 0xf8,
-	0x4e, 0x5b, 0xe5, 0x98, 0x95, 0x62, 0x01, 0xd5, 0xa5, 0x8c, 0x7d, 0x5d, 0xa4, 0x8c, 0xc6, 0x10,
-	0xf5, 0x3f, 0xd8, 0x21, 0x42, 0x9f, 0x06, 0x58, 0x77, 0x3d, 0x37, 0xdc, 0x64, 0xd4, 0x07, 0x0e,
-	0x4c, 0x5d, 0x31, 0xc5, 0xcb, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x44, 0x1d, 0x20, 0xe5,
-	0x45, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x11, 0xeb, 0x78, 0xf6, 0x67, 0x93, 0xeb,
-	0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x5e, 0xc7, 0xb7, 0xd0, 0x79, 0x7c, 0xed, 0xbf, 0x1e, 0x86,
-	0x09, 0xa3, 0xb1, 0x76, 0xd8, 0xc3, 0x99, 0x7b, 0x99, 0x5e, 0x40, 0x4e, 0x44, 0xc4, 0xfe, 0xb3,
-	0xbb, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xe1, 0x86, 0x13, 0x32,
-	0x89, 0x25, 0x11, 0xfb, 0xae, 0x17, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, 0xd6, 0xe7, 0xb4,
-	0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, 0xd8, 0x0e, 0xe6,
-	0x30, 0x74, 0x89, 0x1d, 0xad, 0x74, 0x55, 0x2c, 0x50, 0x6e, 0x94, 0x2d, 0xb3, 0x7e, 0x83, 0xc9,
-	0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0x81, 0x0e, 0x6f, 0xb2, 0xa7, 0x60, 0x90, 0xfd, 0x50,
-	0x2b, 0x40, 0xcd, 0x46, 0x99, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0xa1, 0xde, 0x16, 0x0c, 0x7d,
-	0xf5, 0x89, 0x45, 0xcd, 0xcc, 0x2e, 0x86, 0xf8, 0x29, 0x27, 0x96, 0x3c, 0x96, 0x30, 0xf4, 0x6b,
-	0x16, 0x20, 0xa7, 0x41, 0x5f, 0xcb, 0xb4, 0x58, 0x3d, 0x6e, 0x80, 0xb1, 0xda, 0xaf, 0x76, 0x1d,
-	0xf6, 0x76, 0x38, 0x3b, 0x97, 0xaa, 0xcd, 0x25, 0xa5, 0xaf, 0x88, 0x2e, 0xa2, 0x34, 0x82, 0x7e,
-	0x19, 0x5d, 0x73, 0xc3, 0xe8, 0xf3, 0x7f, 0x9f, 0xb8, 0x9c, 0x32, 0xba, 0x84, 0x6e, 0xe8, 0x8f,
-	0xaf, 0x91, 0x03, 0x3e, 0xbe, 0xc6, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, 0x8c, 0xb2, 0x2f,
-	0x7f, 0xa2, 0xcb, 0x03, 0x46, 0x88, 0xd3, 0x7b, 0x79, 0xc6, 0x54, 0x84, 0x1e, 0x78, 0x8c, 0x75,
-	0xb9, 0xf3, 0x23, 0xf8, 0x46, 0x48, 0x82, 0xf9, 0x93, 0x52, 0x4d, 0xbc, 0xaf, 0xf3, 0x1e, 0x9a,
-	0xde, 0xf8, 0x87, 0x2c, 0x98, 0x4e, 0x0f, 0x10, 0xef, 0xd2, 0xf4, 0x38, 0xeb, 0xbf, 0xdd, 0x69,
-	0x64, 0x44, 0xe7, 0xa5, 0xb9, 0xeb, 0xf4, 0x5c, 0x0e, 0x2d, 0x9c, 0xdb, 0x0a, 0xba, 0x04, 0x10,
-	0x46, 0x7e, 0x8b, 0x9f, 0xf5, 0x8c, 0x99, 0x1d, 0x66, 0x06, 0x17, 0x50, 0x55, 0xa5, 0xfb, 0xf1,
-	0x5d, 0xa0, 0xe1, 0xce, 0xb4, 0xe1, 0x44, 0xce, 0x8a, 0xc9, 0x90, 0x77, 0x2f, 0xea, 0xf2, 0xee,
-	0x2e, 0x52, 0xd2, 0x59, 0x39, 0xa7, 0xb3, 0x6f, 0xb5, 0x1d, 0x2f, 0x72, 0xa3, 0x1d, 0x5d, 0x3e,
-	0xee, 0x81, 0x39, 0x94, 0xe8, 0x53, 0xd0, 0xdf, 0x70, 0xbd, 0xf6, 0x5d, 0x71, 0xc7, 0x9e, 0xcb,
-	0x7e, 0xfe, 0x78, 0xed, 0xbb, 0xe6, 0xe4, 0x94, 0xe8, 0x56, 0x66, 0xe5, 0xfb, 0xbb, 0x25, 0x94,
-	0x46, 0xc0, 0x9c, 0xaa, 0xfd, 0x34, 0x8c, 0x2f, 0x3a, 0xa4, 0xe9, 0x7b, 0x4b, 0x5e, 0xbd, 0xe5,
-	0xbb, 0x5e, 0x84, 0xa6, 0xa1, 0x8f, 0x31, 0x97, 0xfc, 0x6a, 0xed, 0xa3, 0x83, 0x8f, 0x59, 0x89,
-	0xbd, 0x01, 0xc7, 0x16, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0xa8, 0xcf, 0x55, 0xca, 0x9a, 0xbc, 0x70,
-	0x55, 0xca, 0xab, 0xac, 0x7c, 0x69, 0x80, 0x56, 0x93, 0x2f, 0xc2, 0x65, 0xb7, 0x41, 0x72, 0xa4,
-	0xba, 0x3f, 0x5b, 0x30, 0x5a, 0x8a, 0xf1, 0x95, 0x4e, 0xd2, 0xca, 0x35, 0x67, 0x78, 0x0b, 0x86,
-	0xd6, 0x5d, 0xd2, 0xa8, 0x63, 0xb2, 0x2e, 0x66, 0xe3, 0xc9, 0x7c, 0x83, 0xc7, 0x65, 0x8a, 0xa9,
-	0x94, 0xa7, 0x4c, 0xda, 0xb5, 0x2c, 0x2a, 0x63, 0x45, 0x06, 0x6d, 0xc1, 0xa4, 0x9c, 0x33, 0x09,
-	0x15, 0xe7, 0xfd, 0x53, 0x9d, 0x96, 0xaf, 0x49, 0x9c, 0x19, 0x7f, 0xe3, 0x04, 0x19, 0x9c, 0x22,
-	0x8c, 0x4e, 0x41, 0x5f, 0x93, 0x72, 0x36, 0x7d, 0x6c, 0xf8, 0x99, 0x78, 0x8b, 0x49, 0xea, 0x58,
-	0xa9, 0xfd, 0xf3, 0x16, 0x9c, 0x48, 0x8d, 0x8c, 0x90, 0x58, 0x3e, 0xe0, 0x59, 0x48, 0x4a, 0x10,
-	0x0b, 0xdd, 0x25, 0x88, 0xf6, 0x7f, 0x61, 0xc1, 0xd1, 0xa5, 0x66, 0x2b, 0xda, 0x59, 0x74, 0x4d,
-	0xdb, 0x83, 0x97, 0x61, 0xa0, 0x49, 0xea, 0x6e, 0xbb, 0x29, 0x66, 0xae, 0x24, 0x6f, 0xff, 0x15,
-	0x56, 0x4a, 0x4f, 0x90, 0x6a, 0xe4, 0x07, 0xce, 0x06, 0xe1, 0x05, 0x58, 0xa0, 0x33, 0x1e, 0xca,
-	0xbd, 0x47, 0xae, 0xb9, 0x4d, 0x37, 0xba, 0xbf, 0xdd, 0x25, 0xcc, 0x06, 0x24, 0x11, 0x1c, 0xd3,
-	0xb3, 0xbf, 0x69, 0xc1, 0x84, 0x5c, 0xf7, 0x73, 0xf5, 0x7a, 0x40, 0xc2, 0x10, 0xcd, 0x40, 0xc1,
-	0x6d, 0x89, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x5c, 0xc1, 0x05, 0xb7, 0x25, 0x9f, 0x6b, 0x8c, 0xc1,
-	0x28, 0x9a, 0x16, 0x14, 0x57, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x3c, 0x0c, 0x79, 0x7e, 0x9d, 0xbf,
-	0x78, 0x84, 0x0e, 0x9d, 0x62, 0xae, 0x8a, 0x32, 0xac, 0xa0, 0xa8, 0x02, 0xc3, 0xdc, 0xbe, 0x36,
-	0x5e, 0xb4, 0x3d, 0x59, 0xe9, 0xb2, 0x2f, 0x5b, 0x93, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0x53, 0x0b,
-	0x46, 0xe5, 0x97, 0xf5, 0xf8, 0x16, 0xa5, 0x5b, 0x2b, 0x7e, 0x87, 0xc6, 0x5b, 0x8b, 0xbe, 0x25,
-	0x19, 0xc4, 0x78, 0x42, 0x16, 0x0f, 0xf4, 0x84, 0xbc, 0x08, 0x23, 0x4e, 0xab, 0x55, 0x31, 0xdf,
-	0x9f, 0x6c, 0x29, 0xcd, 0xc5, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xae, 0x00, 0xe3, 0xf2, 0x0b, 0xaa,
-	0xed, 0xdb, 0x21, 0x89, 0xd0, 0x1a, 0x0c, 0x3b, 0x7c, 0x96, 0x88, 0x5c, 0xe4, 0x8f, 0x65, 0xcb,
-	0x45, 0x8d, 0x29, 0x8d, 0x19, 0xe9, 0x39, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x03, 0xa6, 0x3c, 0x3f,
-	0x62, 0x4c, 0x95, 0x82, 0x77, 0x52, 0x55, 0x27, 0xa9, 0x9f, 0x14, 0xd4, 0xa7, 0x56, 0x93, 0x54,
-	0x70, 0x9a, 0x30, 0x5a, 0x92, 0xb2, 0xe6, 0x62, 0xbe, 0x90, 0x50, 0x9f, 0xb8, 0x6c, 0x51, 0xb3,
-	0xfd, 0x47, 0x16, 0x0c, 0x4b, 0xb4, 0xc3, 0xb0, 0x4a, 0x58, 0x81, 0xc1, 0x90, 0x4d, 0x82, 0x1c,
-	0x1a, 0xbb, 0x53, 0xc7, 0xf9, 0x7c, 0xc5, 0xbc, 0x22, 0xff, 0x1f, 0x62, 0x49, 0x83, 0xa9, 0x1a,
-	0x55, 0xf7, 0xdf, 0x27, 0xaa, 0x46, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x1f, 0x58, 0x9f, 0x35, 0xd9,
-	0x3d, 0x7d, 0xd2, 0xb4, 0x02, 0xb2, 0xee, 0xde, 0x4d, 0x3e, 0x69, 0x2a, 0xac, 0x14, 0x0b, 0x28,
-	0x7a, 0x07, 0x46, 0x6b, 0x52, 0xc7, 0x14, 0xef, 0xf0, 0x73, 0x1d, 0xf5, 0x9d, 0x4a, 0x35, 0xce,
-	0x65, 0xa4, 0x0b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xed, 0xc7, 0x8a, 0xdd, 0xec, 0xc7, 0x62, 0xba,
-	0xf9, 0xd6, 0x54, 0xbf, 0x60, 0xc1, 0x00, 0xd7, 0x2d, 0xf4, 0xa6, 0xda, 0xd1, 0x2c, 0x05, 0xe2,
-	0xb1, 0xbb, 0x49, 0x0b, 0x05, 0x67, 0x83, 0x56, 0x60, 0x98, 0xfd, 0x60, 0xba, 0x91, 0x62, 0xbe,
-	0xb7, 0x19, 0x6f, 0x55, 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x9f, 0x2e, 0xd2, 0xd3,
-	0x2d, 0x46, 0x35, 0x2e, 0x7d, 0xeb, 0xe1, 0x5d, 0xfa, 0x85, 0x87, 0x75, 0xe9, 0x6f, 0xc0, 0x44,
-	0x4d, 0xb3, 0x2b, 0x88, 0x67, 0xf2, 0x7c, 0xc7, 0x45, 0xa2, 0x99, 0x20, 0x70, 0xe9, 0xeb, 0x82,
-	0x49, 0x04, 0x27, 0xa9, 0xa2, 0x4f, 0xc2, 0x28, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xc1, 0x7b, 0x22,
-	0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0x69, 0xbd, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x7f, 0xb1, 0x00, 0x2d,
-	0xb5, 0x36, 0x49, 0x93, 0x04, 0x4e, 0x23, 0x56, 0x0f, 0x7e, 0xc9, 0x82, 0x69, 0x92, 0x2a, 0x5e,
-	0xf0, 0x9b, 0x4d, 0x21, 0x0c, 0xc8, 0x91, 0x57, 0x2d, 0xe5, 0xd4, 0x89, 0x1f, 0x04, 0x79, 0x18,
-	0x38, 0xb7, 0x3d, 0xb4, 0x02, 0x47, 0xf8, 0x2d, 0xa9, 0x00, 0x9a, 0x95, 0xde, 0x23, 0x82, 0xf0,
-	0x91, 0xb5, 0x34, 0x0a, 0xce, 0xaa, 0x67, 0xff, 0xd1, 0x18, 0xe4, 0xf6, 0xe2, 0x03, 0xbd, 0xe8,
-	0x07, 0x7a, 0xd1, 0x0f, 0xf4, 0xa2, 0x1f, 0xe8, 0x45, 0x3f, 0xd0, 0x8b, 0x7e, 0xa0, 0x17, 0x7d,
-	0x9f, 0xea, 0x45, 0x7f, 0xc6, 0x82, 0x63, 0xea, 0xfa, 0x32, 0x1e, 0xec, 0x9f, 0x83, 0x23, 0x7c,
-	0xbb, 0x2d, 0x34, 0x1c, 0xb7, 0xb9, 0x46, 0x9a, 0xad, 0x86, 0x13, 0x49, 0xeb, 0xa7, 0x8b, 0x99,
-	0x2b, 0x37, 0xe1, 0x62, 0x61, 0x54, 0xe4, 0xbe, 0x6a, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0xfb,
-	0x43, 0xd0, 0xbf, 0xb4, 0x4d, 0xbc, 0xe8, 0x10, 0x9e, 0x36, 0x35, 0x18, 0x77, 0xbd, 0x6d, 0xbf,
-	0xb1, 0x4d, 0xea, 0x1c, 0x7e, 0x90, 0x17, 0xf8, 0x71, 0x41, 0x7a, 0xbc, 0x6c, 0x90, 0xc0, 0x09,
-	0x92, 0x0f, 0x43, 0xbb, 0x74, 0x19, 0x06, 0xf8, 0xe5, 0x23, 0x54, 0x4b, 0x99, 0x67, 0x36, 0x1b,
-	0x44, 0x71, 0xa5, 0xc6, 0x9a, 0x2f, 0x7e, 0xb9, 0x89, 0xea, 0xe8, 0xb3, 0x30, 0xbe, 0xee, 0x06,
-	0x61, 0xb4, 0xe6, 0x36, 0xe9, 0xd5, 0xd0, 0x6c, 0xdd, 0x87, 0x36, 0x49, 0x8d, 0xc3, 0xb2, 0x41,
-	0x09, 0x27, 0x28, 0xa3, 0x0d, 0x18, 0x6b, 0x38, 0x7a, 0x53, 0x83, 0x07, 0x6e, 0x4a, 0xdd, 0x0e,
-	0xd7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0xb7, 0x53, 0x8d, 0x29, 0x44, 0x86, 0x98, 0x38, 0x43, 0x6d,
-	0x27, 0xae, 0x09, 0xe1, 0x30, 0xca, 0xa0, 0x31, 0x47, 0x85, 0x61, 0x93, 0x41, 0xd3, 0xdc, 0x11,
-	0x3e, 0x03, 0xc3, 0x84, 0x0e, 0x21, 0x25, 0x2c, 0x2e, 0x98, 0x0b, 0xbd, 0xf5, 0x75, 0xc5, 0xad,
-	0x05, 0xbe, 0xa9, 0xc7, 0x5b, 0x92, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57,
-	0xe9, 0x0a, 0x3a, 0x4c, 0x23, 0x43, 0xe3, 0xce, 0x90, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72,
-	0x98, 0x28, 0x96, 0x5d, 0x06, 0xda, 0xf2, 0x9a, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x9b, 0x30, 0x18,
-	0x90, 0x06, 0x53, 0x14, 0x8f, 0xf5, 0xbe, 0xc8, 0xb9, 0xde, 0x99, 0xd7, 0xc3, 0x92, 0x00, 0xba,
-	0x0a, 0x28, 0x20, 0x94, 0xc1, 0x73, 0xbd, 0x0d, 0x65, 0xbe, 0x2f, 0x0e, 0x5a, 0xc5, 0x48, 0xe3,
-	0x18, 0x43, 0xfa, 0xc1, 0xe2, 0x8c, 0x6a, 0xe8, 0x32, 0x4c, 0xa9, 0xd2, 0xb2, 0x17, 0x46, 0x0e,
-	0x3d, 0xe0, 0xb8, 0xb8, 0x5e, 0xc9, 0x57, 0x70, 0x12, 0x01, 0xa7, 0xeb, 0xd8, 0xbf, 0x61, 0x01,
-	0x1f, 0xe7, 0x43, 0x90, 0x2a, 0xbc, 0x6e, 0x4a, 0x15, 0x4e, 0xe6, 0xce, 0x5c, 0x8e, 0x44, 0xe1,
-	0x37, 0x2c, 0x18, 0xd1, 0x66, 0x36, 0x5e, 0xb3, 0x56, 0x87, 0x35, 0xdb, 0x86, 0x49, 0xba, 0xd2,
-	0xaf, 0xdf, 0x0e, 0x49, 0xb0, 0x4d, 0xea, 0x6c, 0x61, 0x16, 0xee, 0x6f, 0x61, 0x2a, 0x53, 0xe1,
-	0x6b, 0x09, 0x82, 0x38, 0xd5, 0x84, 0xfd, 0x19, 0xd9, 0x55, 0x65, 0x59, 0x5d, 0x53, 0x73, 0x9e,
-	0xb0, 0xac, 0x56, 0xb3, 0x8a, 0x63, 0x1c, 0xba, 0xd5, 0x36, 0xfd, 0x30, 0x4a, 0x5a, 0x56, 0x5f,
-	0xf1, 0xc3, 0x08, 0x33, 0x88, 0xfd, 0x02, 0xc0, 0xd2, 0x5d, 0x52, 0xe3, 0x2b, 0x56, 0x7f, 0xf4,
-	0x58, 0xf9, 0x8f, 0x1e, 0xfb, 0x6f, 0x2c, 0x18, 0x5f, 0x5e, 0x30, 0x6e, 0xae, 0x59, 0x00, 0xfe,
-	0x52, 0xbb, 0x75, 0x6b, 0x55, 0x9a, 0xf7, 0x70, 0x0b, 0x07, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x12,
-	0x8a, 0x8d, 0xb6, 0x27, 0xc4, 0x9e, 0x83, 0xf4, 0x7a, 0xbc, 0xd6, 0xf6, 0x30, 0x2d, 0xd3, 0x7c,
-	0xe0, 0x8a, 0x3d, 0xfb, 0xc0, 0x75, 0x0d, 0xc5, 0x83, 0x4a, 0xd0, 0x7f, 0xe7, 0x8e, 0x5b, 0xe7,
-	0x11, 0x06, 0x84, 0xe9, 0xd1, 0xad, 0x5b, 0xe5, 0xc5, 0x10, 0xf3, 0x72, 0xfb, 0xcb, 0x45, 0x98,
-	0x59, 0x6e, 0x90, 0xbb, 0xef, 0x31, 0xca, 0x42, 0xaf, 0x1e, 0x7c, 0x07, 0x13, 0x20, 0x1d, 0xd4,
-	0x4b, 0xb3, 0xfb, 0x78, 0xac, 0xc3, 0x20, 0x37, 0x2c, 0x96, 0x31, 0x17, 0x32, 0xd5, 0xb9, 0xf9,
-	0x03, 0x32, 0xcb, 0x0d, 0x94, 0x85, 0x3a, 0x57, 0x5d, 0x98, 0xa2, 0x14, 0x4b, 0xe2, 0x33, 0xaf,
-	0xc0, 0xa8, 0x8e, 0x79, 0x20, 0x7f, 0xe9, 0x1f, 0x2e, 0xc2, 0x24, 0xed, 0xc1, 0x43, 0x9d, 0x88,
-	0x1b, 0xe9, 0x89, 0x78, 0xd0, 0x3e, 0xb3, 0xdd, 0x67, 0xe3, 0x9d, 0xe4, 0x6c, 0x5c, 0xcc, 0x9b,
-	0x8d, 0xc3, 0x9e, 0x83, 0x1f, 0xb1, 0xe0, 0xc8, 0x72, 0xc3, 0xaf, 0x6d, 0x25, 0xfc, 0x5a, 0x5f,
-	0x82, 0x11, 0x7a, 0x1c, 0x87, 0x46, 0x88, 0x17, 0x23, 0xe8, 0x8f, 0x00, 0x61, 0x1d, 0x4f, 0xab,
-	0x76, 0xe3, 0x46, 0x79, 0x31, 0x2b, 0x56, 0x90, 0x00, 0x61, 0x1d, 0xcf, 0xfe, 0x4b, 0x0b, 0x4e,
-	0x5f, 0x5e, 0x58, 0x8a, 0x97, 0x62, 0x2a, 0x5c, 0xd1, 0x39, 0x18, 0x68, 0xd5, 0xb5, 0xae, 0xc4,
-	0x62, 0xe1, 0x45, 0xd6, 0x0b, 0x01, 0x7d, 0xbf, 0x44, 0x06, 0xbb, 0x01, 0x70, 0x19, 0x57, 0x16,
-	0xc4, 0xb9, 0x2b, 0xb5, 0x40, 0x56, 0xae, 0x16, 0xe8, 0x09, 0x18, 0xa4, 0xf7, 0x82, 0x5b, 0x93,
-	0xfd, 0xe6, 0x06, 0x1b, 0xbc, 0x08, 0x4b, 0x98, 0xfd, 0xeb, 0x16, 0x1c, 0xb9, 0xec, 0x46, 0xf4,
-	0xd2, 0x4e, 0xc6, 0xe3, 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x93, 0x8c, 0xc7, 0x83, 0x15,
-	0x04, 0x6b, 0x58, 0xfc, 0x83, 0xb6, 0x5d, 0xe6, 0x29, 0x53, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63,
-	0x85, 0x41, 0xc7, 0xab, 0xee, 0x06, 0x4c, 0x64, 0xb9, 0x23, 0x0e, 0x6e, 0x35, 0x5e, 0x8b, 0x12,
-	0x80, 0x63, 0x1c, 0xfb, 0x9f, 0x2c, 0x28, 0x5d, 0xe6, 0xfe, 0xbe, 0xeb, 0x61, 0xce, 0xa1, 0xfb,
-	0x02, 0x0c, 0x13, 0xa9, 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xb0, 0x40, 0x0a,
-	0xaf, 0x07, 0xe7, 0xfb, 0x83, 0x79, 0x4f, 0x2f, 0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x9c, 0x24, 0x16,
-	0x70, 0x65, 0x29, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb7, 0xe0, 0x98, 0xfa, 0xe0, 0xf7, 0xdd,
-	0x67, 0xda, 0xbf, 0x53, 0x80, 0xb1, 0x2b, 0x6b, 0x6b, 0x95, 0xcb, 0x24, 0xd2, 0x56, 0x65, 0x67,
-	0xb5, 0x3f, 0xd6, 0xb4, 0x97, 0x9d, 0xde, 0x88, 0xed, 0xc8, 0x6d, 0xcc, 0xf2, 0xe8, 0x7f, 0xb3,
-	0x65, 0x2f, 0xba, 0x1e, 0x54, 0xa3, 0xc0, 0xf5, 0x36, 0x32, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79,
-	0x3c, 0x0b, 0x7a, 0x01, 0x06, 0x58, 0xf8, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0xdd,
-	0xdf, 0x2d, 0x0d, 0xdf, 0xc0, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x03, 0x46, 0x36, 0xa3, 0xa8,
-	0x75, 0x85, 0x38, 0x75, 0x12, 0xc8, 0x53, 0xf6, 0x4c, 0xd6, 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5,
-	0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x05, 0x88, 0x61, 0x0f, 0x48, 0x71, 0x63, 0xaf,
-	0xc1, 0x30, 0xfd, 0xdc, 0xb9, 0x86, 0xeb, 0x74, 0x56, 0x8d, 0x3f, 0x03, 0xc3, 0x52, 0xf1, 0x1d,
-	0x8a, 0xe0, 0x20, 0xec, 0x46, 0x92, 0x7a, 0xf1, 0x10, 0xc7, 0x70, 0xfb, 0x71, 0x10, 0xb6, 0xc3,
-	0x9d, 0x48, 0xda, 0xeb, 0x70, 0x94, 0x19, 0x41, 0x3b, 0xd1, 0xa6, 0xb1, 0x46, 0xbb, 0x2f, 0x86,
-	0x67, 0xc5, 0xbb, 0xae, 0xa0, 0xec, 0x7d, 0xa4, 0xf3, 0xf9, 0xa8, 0xa4, 0x18, 0xbf, 0xf1, 0xec,
-	0x7f, 0xec, 0x83, 0x47, 0xca, 0xd5, 0xfc, 0xa8, 0x56, 0x97, 0x60, 0x94, 0xb3, 0x8b, 0x74, 0x69,
-	0x38, 0x0d, 0xd1, 0xae, 0x92, 0x80, 0xae, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0xd3, 0x50, 0x74, 0xdf,
-	0xf5, 0x92, 0xae, 0x99, 0xe5, 0xb7, 0x56, 0x31, 0x2d, 0xa7, 0x60, 0xca, 0x79, 0xf2, 0x23, 0x5d,
-	0x81, 0x15, 0xf7, 0xf9, 0x3a, 0x8c, 0xbb, 0x61, 0x2d, 0x74, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e,
-	0x57, 0x32, 0x07, 0xda, 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xef, 0x99, 0x7b, 0xed,
-	0x1a, 0x53, 0x83, 0x1e, 0xff, 0x2d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70,
-	0x88, 0x25, 0x8c, 0x3e, 0xe8, 0x6a, 0x9b, 0x4e, 0x6b, 0xae, 0x1d, 0x6d, 0x2e, 0xba, 0x61, 0xcd,
-	0xdf, 0x26, 0xc1, 0x0e, 0x7b, 0x8b, 0x0f, 0xc5, 0x0f, 0x3a, 0x05, 0x58, 0xb8, 0x32, 0x57, 0xa1,
-	0x98, 0x38, 0x5d, 0x07, 0xcd, 0xc1, 0x84, 0x2c, 0xac, 0x92, 0x90, 0x5d, 0x01, 0x23, 0x8c, 0x8c,
-	0x72, 0x96, 0x14, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3,
-	0x98, 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x97, 0x75,
-	0x00, 0x36, 0xf1, 0xec, 0xff, 0xb3, 0x0f, 0xa6, 0xd8, 0xb4, 0x7d, 0xb0, 0xc2, 0xbe, 0x97, 0x56,
-	0xd8, 0x8d, 0xf4, 0x0a, 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc5, 0x64,
-	0xdc, 0xc6, 0x32, 0xbb, 0x00, 0xc3, 0x81, 0xe1, 0xc7, 0x3a, 0xac, 0x2b, 0xb5, 0xa4, 0x4b, 0x6a,
-	0x8c, 0x83, 0xde, 0x00, 0x68, 0xc5, 0x32, 0xf4, 0x82, 0x11, 0x7c, 0x14, 0x72, 0xc5, 0xe7, 0x5a,
-	0x1d, 0xfb, 0xb3, 0x30, 0xac, 0x1c, 0x55, 0xa5, 0xa7, 0xba, 0x95, 0xe3, 0xa9, 0xde, 0x9d, 0x8d,
-	0x90, 0xb6, 0x71, 0xc5, 0x4c, 0xdb, 0xb8, 0xff, 0xcb, 0x82, 0x58, 0xc3, 0x81, 0xde, 0x82, 0xe1,
-	0x96, 0xcf, 0x4c, 0xa9, 0x03, 0xe9, 0x9f, 0xf0, 0x78, 0x47, 0x15, 0x09, 0x8f, 0x30, 0x18, 0xf0,
-	0xe9, 0xa8, 0xc8, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc2, 0x60, 0x2b, 0x20, 0xd5, 0x88, 0x85, 0xbf,
-	0xea, 0x9d, 0x20, 0x5f, 0xbe, 0xbc, 0x22, 0x96, 0x14, 0x12, 0x96, 0xa9, 0xc5, 0xde, 0x2d, 0x53,
-	0xed, 0xdf, 0x2a, 0xc0, 0x64, 0xb2, 0x11, 0xf4, 0x1a, 0xf4, 0x91, 0xbb, 0xa4, 0x26, 0xbe, 0x34,
-	0x93, 0x9b, 0x88, 0xa5, 0x2b, 0x7c, 0xe8, 0xe8, 0x7f, 0xcc, 0x6a, 0xa1, 0x2b, 0x30, 0x48, 0x59,
-	0x89, 0xcb, 0x2a, 0x48, 0xe4, 0xa3, 0x79, 0xec, 0x88, 0xe2, 0xc9, 0xf8, 0x67, 0x89, 0x22, 0x2c,
-	0xab, 0x33, 0x53, 0xb6, 0x5a, 0xab, 0x4a, 0x5f, 0x69, 0x51, 0x27, 0x61, 0xc2, 0xda, 0x42, 0x85,
-	0x23, 0x09, 0x6a, 0xdc, 0x94, 0x4d, 0x16, 0xe2, 0x98, 0x08, 0x7a, 0x03, 0xfa, 0xc3, 0x06, 0x21,
-	0x2d, 0x61, 0xab, 0x90, 0x29, 0x1f, 0xad, 0x52, 0x04, 0x41, 0x89, 0xc9, 0x53, 0x58, 0x01, 0xe6,
-	0x15, 0xed, 0xdf, 0xb5, 0x00, 0xb8, 0xed, 0x9f, 0xe3, 0x6d, 0x90, 0x43, 0x50, 0x29, 0x2c, 0x42,
-	0x5f, 0xd8, 0x22, 0xb5, 0x4e, 0x1e, 0x06, 0x71, 0x7f, 0xaa, 0x2d, 0x52, 0x8b, 0x57, 0x3b, 0xfd,
-	0x87, 0x59, 0x6d, 0xfb, 0x47, 0x01, 0xc6, 0x63, 0xb4, 0x72, 0x44, 0x9a, 0xe8, 0x39, 0x23, 0xb2,
-	0xce, 0xc9, 0x44, 0x64, 0x9d, 0x61, 0x86, 0xad, 0x49, 0xaf, 0x3f, 0x0b, 0xc5, 0xa6, 0x73, 0x57,
-	0x88, 0x27, 0x9f, 0xe9, 0xdc, 0x0d, 0x4a, 0x7f, 0x76, 0xc5, 0xb9, 0xcb, 0x5f, 0xf0, 0xcf, 0xc8,
-	0xdd, 0xb9, 0xe2, 0xdc, 0xed, 0x6a, 0x05, 0x4f, 0x1b, 0x61, 0x6d, 0xb9, 0x9e, 0x30, 0x6b, 0xeb,
-	0xa9, 0x2d, 0xd7, 0x4b, 0xb6, 0xe5, 0x7a, 0x3d, 0xb4, 0xe5, 0x7a, 0xe8, 0x1e, 0x0c, 0x0a, 0xab,
-	0x53, 0x11, 0xf2, 0xef, 0x42, 0x0f, 0xed, 0x09, 0xa3, 0x55, 0xde, 0xe6, 0x05, 0x29, 0xa1, 0x10,
-	0xa5, 0x5d, 0xdb, 0x95, 0x0d, 0xa2, 0xff, 0xd4, 0x82, 0x71, 0xf1, 0x1b, 0x93, 0x77, 0xdb, 0x24,
-	0x8c, 0x04, 0x07, 0xff, 0x91, 0xde, 0xfb, 0x20, 0x2a, 0xf2, 0xae, 0x7c, 0x44, 0x5e, 0xb6, 0x26,
-	0xb0, 0x6b, 0x8f, 0x12, 0xbd, 0x40, 0xbf, 0x65, 0xc1, 0xd1, 0xa6, 0x73, 0x97, 0xb7, 0xc8, 0xcb,
-	0xb0, 0x13, 0xb9, 0xbe, 0xb0, 0xde, 0x78, 0xad, 0xb7, 0xe9, 0x4f, 0x55, 0xe7, 0x9d, 0x94, 0xaa,
-	0xda, 0xa3, 0x59, 0x28, 0x5d, 0xbb, 0x9a, 0xd9, 0xaf, 0x99, 0x75, 0x18, 0x92, 0xeb, 0xed, 0x61,
-	0x9a, 0xd4, 0xb3, 0x76, 0xc4, 0x5a, 0x7b, 0xa8, 0xed, 0x7c, 0x16, 0x46, 0xf5, 0x35, 0xf6, 0x50,
-	0xdb, 0x7a, 0x17, 0x8e, 0x64, 0xac, 0xa5, 0x87, 0xda, 0xe4, 0x1d, 0x38, 0x99, 0xbb, 0x3e, 0x1e,
-	0xaa, 0x4b, 0xc4, 0xef, 0x58, 0xfa, 0x39, 0x78, 0x08, 0x7a, 0x9d, 0x05, 0x53, 0xaf, 0x73, 0xa6,
-	0xf3, 0xce, 0xc9, 0x51, 0xee, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x68, 0xd0,
-	0x12, 0x69, 0xbb, 0x6c, 0x77, 0xdf, 0x91, 0x31, 0x47, 0xcd, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0x5f,
-	0xb1, 0x20, 0xc3, 0xa9, 0x83, 0x72, 0x58, 0x6d, 0xb7, 0xce, 0x86, 0xa4, 0x18, 0x73, 0x58, 0x2a,
-	0xf0, 0xcc, 0x69, 0x28, 0x6e, 0xb8, 0x75, 0xe1, 0xcd, 0xac, 0xc0, 0x97, 0x29, 0x78, 0xc3, 0xad,
-	0xa3, 0x65, 0x40, 0x61, 0xbb, 0xd5, 0x6a, 0x30, 0x83, 0x27, 0xa7, 0x71, 0x39, 0xf0, 0xdb, 0x2d,
-	0x6e, 0xa8, 0x5c, 0xe4, 0xe2, 0xa5, 0x6a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, 0x7f, 0x60, 0x41, 0xdf,
-	0x21, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x53, 0xc4, 0x2c, 0x76, 0xee, 0x2c,
-	0xdd, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0x76, 0x2d, 0x38, 0x72, 0xcd, 0x77, 0xea,
-	0xf3, 0x4e, 0xc3, 0xf1, 0x6a, 0x24, 0x28, 0x7b, 0x1b, 0x07, 0xf2, 0x0a, 0x28, 0x74, 0xf5, 0x0a,
-	0xb8, 0x04, 0x03, 0x6e, 0x4b, 0x0b, 0x35, 0x7f, 0x96, 0xce, 0x6e, 0xb9, 0x22, 0xa2, 0xcc, 0x23,
-	0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x1c, 0xaf, 0x2f, 0x7f, 0x59, 0xd2, 0x57,
-	0x52, 0x32, 0x84, 0x9a, 0x61, 0x38, 0xbe, 0x09, 0x46, 0x13, 0xc2, 0x4d, 0x0a, 0xc3, 0xa0, 0xcb,
-	0xbf, 0x54, 0xac, 0xcd, 0x27, 0xb3, 0x5f, 0x2f, 0xa9, 0x81, 0xd1, 0xfc, 0x01, 0x79, 0x01, 0x96,
-	0x84, 0xec, 0x4b, 0x90, 0x19, 0xf2, 0xa6, 0xbb, 0x64, 0xca, 0xfe, 0x04, 0x4c, 0xb1, 0x9a, 0x07,
-	0x94, 0xfa, 0xd8, 0x09, 0x79, 0x7a, 0x46, 0xd4, 0x60, 0xfb, 0x7f, 0xb5, 0x00, 0xad, 0xf8, 0x75,
-	0x77, 0x7d, 0x47, 0x10, 0xe7, 0xdf, 0xff, 0x2e, 0x94, 0xf8, 0xb3, 0x3a, 0x19, 0x59, 0x77, 0xa1,
-	0xe1, 0x84, 0xa1, 0x26, 0xcb, 0x7f, 0x52, 0xb4, 0x5b, 0x5a, 0xeb, 0x8c, 0x8e, 0xbb, 0xd1, 0x43,
-	0x6f, 0x25, 0x02, 0x1d, 0x7e, 0x34, 0x15, 0xe8, 0xf0, 0xc9, 0x4c, 0x8b, 0x9a, 0x74, 0xef, 0x65,
-	0x00, 0x44, 0xfb, 0x8b, 0x16, 0x4c, 0xac, 0x26, 0x22, 0xc5, 0x9e, 0x63, 0xe6, 0x05, 0x19, 0x3a,
-	0xaa, 0x2a, 0x2b, 0xc5, 0x02, 0xfa, 0xc0, 0x65, 0xb8, 0xdf, 0xb6, 0x20, 0x0e, 0xb1, 0x75, 0x08,
-	0x2c, 0xf7, 0x82, 0xc1, 0x72, 0x67, 0x3e, 0x5f, 0x54, 0x77, 0xf2, 0x38, 0x6e, 0x74, 0x55, 0xcd,
-	0x49, 0x87, 0x97, 0x4b, 0x4c, 0x86, 0xef, 0xb3, 0x71, 0x73, 0xe2, 0xd4, 0x6c, 0x7c, 0xa3, 0x00,
-	0x48, 0xe1, 0xf6, 0x1c, 0x1c, 0x33, 0x5d, 0xe3, 0xc1, 0x04, 0xc7, 0xdc, 0x06, 0xc4, 0x0c, 0x64,
-	0x02, 0xc7, 0x0b, 0x39, 0x59, 0x57, 0x48, 0xad, 0x0f, 0x66, 0x7d, 0x33, 0x23, 0xbd, 0x65, 0xaf,
-	0xa5, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xc3, 0xa7, 0xfe, 0x5e, 0x0d, 0x9f, 0x06, 0xba, 0xb8, 0x7d,
-	0x7f, 0xcd, 0x82, 0x31, 0x35, 0x4c, 0xef, 0x13, 0xe7, 0x11, 0xd5, 0x9f, 0x9c, 0x7b, 0xa5, 0xa2,
-	0x75, 0x99, 0x31, 0x03, 0xdf, 0xc7, 0xdc, 0xf7, 0x9d, 0x86, 0x7b, 0x8f, 0xa8, 0x18, 0xce, 0x25,
-	0xe1, 0x8e, 0x2f, 0x4a, 0xf7, 0x77, 0x4b, 0x63, 0xea, 0x1f, 0x8f, 0x1a, 0x1b, 0x57, 0xb1, 0x7f,
-	0x99, 0x6e, 0x76, 0x73, 0x29, 0xa2, 0x97, 0xa0, 0xbf, 0xb5, 0xe9, 0x84, 0x24, 0xe1, 0x64, 0xd7,
-	0x5f, 0xa1, 0x85, 0xfb, 0xbb, 0xa5, 0x71, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7b, 0xc8, 0xd1,
-	0xf4, 0xe2, 0xec, 0x1a, 0x72, 0xf4, 0x5f, 0x2c, 0xe8, 0x5b, 0xa5, 0xb7, 0xd7, 0xc3, 0x3f, 0x02,
-	0x5e, 0x37, 0x8e, 0x80, 0x53, 0x79, 0xd9, 0x8c, 0x72, 0x77, 0xff, 0x72, 0x62, 0xf7, 0x9f, 0xc9,
-	0xa5, 0xd0, 0x79, 0xe3, 0x37, 0x61, 0x84, 0xe5, 0x48, 0x12, 0x0e, 0x85, 0x2f, 0x18, 0x1b, 0xbe,
-	0x94, 0xd8, 0xf0, 0x13, 0x1a, 0xaa, 0xb6, 0xd3, 0x9f, 0x82, 0x41, 0xe1, 0xa1, 0x96, 0x8c, 0x82,
-	0x20, 0x70, 0xb1, 0x84, 0xdb, 0xbf, 0x50, 0x04, 0x23, 0x27, 0x13, 0xfa, 0x23, 0x0b, 0x66, 0x03,
-	0x6e, 0xb9, 0x5e, 0x5f, 0x6c, 0x07, 0xae, 0xb7, 0x51, 0xad, 0x6d, 0x92, 0x7a, 0xbb, 0xe1, 0x7a,
-	0x1b, 0xe5, 0x0d, 0xcf, 0x57, 0xc5, 0x4b, 0x77, 0x49, 0xad, 0xcd, 0xb4, 0xca, 0x5d, 0x12, 0x40,
-	0x29, 0x0f, 0x90, 0xe7, 0xf7, 0x76, 0x4b, 0xb3, 0xf8, 0x40, 0xb4, 0xf1, 0x01, 0xfb, 0x82, 0xfe,
-	0xd2, 0x82, 0x0b, 0x3c, 0x37, 0x50, 0xef, 0xfd, 0xef, 0x20, 0xe1, 0xa8, 0x48, 0x52, 0x31, 0x91,
-	0x35, 0x12, 0x34, 0xe7, 0x5f, 0x16, 0x03, 0x7a, 0xa1, 0x72, 0xb0, 0xb6, 0xf0, 0x41, 0x3b, 0x67,
-	0xff, 0xb7, 0x45, 0x18, 0x13, 0xa1, 0x29, 0xc5, 0x1d, 0xf0, 0x92, 0xb1, 0x24, 0x1e, 0x4d, 0x2c,
-	0x89, 0x29, 0x03, 0xf9, 0xc1, 0x1c, 0xff, 0x21, 0x4c, 0xd1, 0xc3, 0xf9, 0x0a, 0x71, 0x82, 0xe8,
-	0x36, 0x71, 0xb8, 0x3d, 0x63, 0xf1, 0xc0, 0xa7, 0xbf, 0x12, 0xac, 0x5f, 0x4b, 0x12, 0xc3, 0x69,
-	0xfa, 0xdf, 0x4b, 0x77, 0x8e, 0x07, 0x93, 0xa9, 0xe8, 0xa2, 0x6f, 0xc3, 0xb0, 0x72, 0xaf, 0x12,
-	0x87, 0x4e, 0xe7, 0x20, 0xbd, 0x49, 0x0a, 0x5c, 0xe8, 0x19, 0xbb, 0xf6, 0xc5, 0xe4, 0xec, 0xdf,
-	0x2e, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x85, 0x21, 0x27, 0x64, 0x81, 0xc3, 0xeb, 0x9d, 0x24, 0xda,
-	0xa9, 0x66, 0x98, 0x8b, 0xdb, 0x9c, 0xa8, 0x89, 0x15, 0x0d, 0x74, 0x85, 0x5b, 0x8d, 0x6e, 0x93,
-	0x4e, 0xe2, 0xec, 0x14, 0x35, 0x90, 0x76, 0xa5, 0xdb, 0x04, 0x8b, 0xfa, 0xe8, 0x53, 0xdc, 0xac,
-	0xf7, 0xaa, 0xe7, 0xdf, 0xf1, 0x2e, 0xfb, 0xbe, 0x0c, 0x43, 0xd4, 0x1b, 0xc1, 0x29, 0x69, 0xcc,
-	0xab, 0xaa, 0x63, 0x93, 0x5a, 0x6f, 0xe1, 0xba, 0x3f, 0x07, 0x2c, 0x17, 0x8a, 0x19, 0xcd, 0x20,
-	0x44, 0x04, 0x26, 0x44, 0xdc, 0x53, 0x59, 0x26, 0xc6, 0x2e, 0xf3, 0xf9, 0x6d, 0xd6, 0x8e, 0x35,
-	0x40, 0x57, 0x4d, 0x12, 0x38, 0x49, 0xd3, 0xde, 0xe4, 0x87, 0xf0, 0x32, 0x71, 0xa2, 0x76, 0x40,
-	0x42, 0xf4, 0x71, 0x98, 0x4e, 0xbf, 0x8c, 0x85, 0x22, 0xc5, 0x62, 0xdc, 0xf3, 0xa9, 0xbd, 0xdd,
-	0xd2, 0x74, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0xb6, 0x7f, 0xcd, 0x02, 0xe6, 0x43, 0x7e, 0x08, 0x9c,
-	0xcf, 0xc7, 0x4c, 0xce, 0x67, 0x3a, 0x6f, 0x3a, 0x73, 0x98, 0x9e, 0x17, 0xf9, 0x1a, 0xae, 0x04,
-	0xfe, 0xdd, 0x1d, 0x61, 0xf5, 0xd5, 0xfd, 0x19, 0x67, 0x7f, 0xd9, 0x02, 0x96, 0x38, 0x08, 0xf3,
-	0x57, 0xbb, 0x54, 0x70, 0x74, 0x37, 0x68, 0xf8, 0x38, 0x0c, 0xad, 0x8b, 0xe1, 0xcf, 0x10, 0x3a,
-	0x19, 0x1d, 0x36, 0x69, 0xcb, 0x49, 0x13, 0xbe, 0xa0, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2,
-	0x82, 0x99, 0xfc, 0x6a, 0xe8, 0x06, 0x9c, 0x08, 0x48, 0xad, 0x1d, 0x84, 0x74, 0x4b, 0x88, 0x07,
-	0x90, 0x70, 0xa7, 0xe2, 0x53, 0xfd, 0xc8, 0xde, 0x6e, 0xe9, 0x04, 0xce, 0x46, 0xc1, 0x79, 0x75,
-	0xd1, 0x2b, 0x30, 0xde, 0x0e, 0x39, 0xe7, 0xc7, 0x98, 0xae, 0x50, 0x44, 0xa7, 0x66, 0x1e, 0x47,
-	0x37, 0x0c, 0x08, 0x4e, 0x60, 0xda, 0x3f, 0xc0, 0x97, 0xa3, 0x0a, 0x50, 0xdd, 0x84, 0x29, 0x4f,
-	0xfb, 0x4f, 0x6f, 0x40, 0xf9, 0xd4, 0x7f, 0xbc, 0xdb, 0xad, 0xcf, 0xae, 0x4b, 0xcd, 0xcb, 0x3d,
-	0x41, 0x06, 0xa7, 0x29, 0xdb, 0xbf, 0x68, 0xc1, 0x09, 0x1d, 0x51, 0x73, 0xa4, 0xeb, 0xa6, 0x05,
-	0x5c, 0x84, 0x21, 0xbf, 0x45, 0x02, 0x27, 0xf2, 0x03, 0x71, 0xcd, 0x9d, 0x97, 0x2b, 0xf4, 0xba,
-	0x28, 0xdf, 0x17, 0x09, 0x73, 0x24, 0x75, 0x59, 0x8e, 0x55, 0x4d, 0x64, 0xc3, 0x00, 0x13, 0x20,
-	0x86, 0xc2, 0x65, 0x92, 0x1d, 0x5a, 0xcc, 0xb2, 0x25, 0xc4, 0x02, 0x62, 0xff, 0xa3, 0xc5, 0xd7,
-	0xa7, 0xde, 0x75, 0xf4, 0x2e, 0x4c, 0x36, 0x9d, 0xa8, 0xb6, 0xb9, 0x74, 0xb7, 0x15, 0x70, 0xe5,
-	0xae, 0x1c, 0xa7, 0x67, 0xba, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x69, 0xf5, 0x4a, 0x82, 0x18, 0x4e,
-	0x91, 0x47, 0xb7, 0x61, 0x84, 0x95, 0x31, 0x6f, 0xe0, 0xb0, 0x13, 0x2f, 0x93, 0xd7, 0x9a, 0x32,
-	0x0e, 0x5a, 0x89, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xd5, 0x22, 0x3f, 0x34, 0xd8, 0xdb, 0xe3, 0x29,
-	0x18, 0x6c, 0xf9, 0xf5, 0x85, 0xf2, 0x22, 0x16, 0xb3, 0xa0, 0xee, 0xbd, 0x0a, 0x2f, 0xc6, 0x12,
-	0x8e, 0xce, 0xc3, 0x90, 0xf8, 0x29, 0x95, 0xf1, 0x6c, 0x8f, 0x08, 0xbc, 0x10, 0x2b, 0x28, 0x7a,
-	0x1e, 0xa0, 0x15, 0xf8, 0xdb, 0x6e, 0x9d, 0x45, 0x7f, 0x2a, 0x9a, 0x76, 0x7d, 0x15, 0x05, 0xc1,
-	0x1a, 0x16, 0x7a, 0x15, 0xc6, 0xda, 0x5e, 0xc8, 0xf9, 0x27, 0x2d, 0xc6, 0xbe, 0xb2, 0x38, 0xbb,
-	0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98, 0x9d, 0x5a, 0x7f, 0xbe, 0xf9, 0xfd,
-	0x1a, 0xc5, 0xd0, 0xb3, 0xd9, 0xd1, 0x0a, 0x58, 0x54, 0x44, 0x6f, 0x4b, 0xc7, 0x7c, 0x7e, 0x13,
-	0x09, 0xbf, 0x97, 0xde, 0x6e, 0x2d, 0xcd, 0x2d, 0x5f, 0xf8, 0xd3, 0x18, 0xb4, 0xd0, 0x2b, 0x00,
-	0xe4, 0x6e, 0x44, 0x02, 0xcf, 0x69, 0x28, 0xeb, 0x52, 0xc5, 0xc8, 0x2c, 0xfa, 0xab, 0x7e, 0x74,
-	0x23, 0x24, 0x4b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x8e, 0x00, 0xc4, 0x0f, 0x0d, 0x74, 0x0f,
-	0x86, 0x6a, 0x4e, 0xcb, 0xa9, 0xf1, 0x54, 0xad, 0xc5, 0x3c, 0x7f, 0xe9, 0xb8, 0xc6, 0xec, 0x82,
-	0x40, 0xe7, 0xca, 0x1b, 0x19, 0xa6, 0x7c, 0x48, 0x16, 0x77, 0x55, 0xd8, 0xa8, 0xf6, 0xd0, 0x17,
-	0x2c, 0x18, 0x11, 0xd1, 0x95, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, 0xfb, 0x73, 0x71, 0x0d,
-	0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x5d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x2c, 0xdf, 0xb6,
-	0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x98, 0x5d, 0x35, 0xfa, 0xb3, 0xf6, 0x86, 0xf1, 0xac, 0xed,
-	0xcb, 0xf7, 0x3c, 0x36, 0xf8, 0xed, 0x6e, 0x2f, 0x5a, 0x54, 0xd1, 0xa3, 0x90, 0xf4, 0xe7, 0xbb,
-	0xcb, 0x6a, 0x0f, 0xbb, 0x2e, 0x11, 0x48, 0x3e, 0x0b, 0x13, 0x75, 0x93, 0x6b, 0x11, 0x2b, 0xf1,
-	0xc9, 0x3c, 0xba, 0x09, 0x26, 0x27, 0xe6, 0x53, 0x12, 0x00, 0x9c, 0x24, 0x8c, 0x2a, 0x3c, 0x28,
-	0x4d, 0xd9, 0x5b, 0xf7, 0x85, 0xef, 0x95, 0x9d, 0x3b, 0x97, 0x3b, 0x61, 0x44, 0x9a, 0x14, 0x33,
-	0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0x61, 0x80, 0xf9, 0x4b, 0x86, 0xd3, 0x43,
-	0xf9, 0x6a, 0x0d, 0x33, 0xfa, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, 0x8a, 0xf4,
-	0x46, 0x0e, 0xcb, 0xde, 0x8d, 0x90, 0x30, 0x6f, 0xe4, 0xe1, 0xf9, 0xc7, 0x63, 0x47, 0x63, 0x5e,
-	0x9e, 0x99, 0xf3, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xd2, 0x15, 0xb1, 0xe2, 0x32,
-	0xbb, 0x67, 0xa6, 0xdb, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, 0x52, 0x16, 0x9a, 0xef,
-	0x7a, 0xe1, 0xbd, 0xd5, 0xed, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea, 0x23,
-	0x17, 0x26, 0x02, 0x83, 0xbd, 0x90, 0x21, 0xde, 0xce, 0xf5, 0xc6, 0xc4, 0x68, 0xc9, 0x03, 0x4c,
-	0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x63, 0x9d, 0x5f, 0xfe, 0xdd, 0x58, 0xa3, 0x99,
-	0x2d, 0x18, 0x33, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x32, 0x79, 0xb2, 0x3c, 0x54, 0xcd,
-	0xe3, 0x2b, 0x30, 0xce, 0x36, 0xc2, 0x1d, 0xa7, 0x25, 0x8e, 0xe2, 0xf3, 0xc6, 0x51, 0x6c, 0x9d,
-	0x2f, 0xf2, 0x81, 0x91, 0x43, 0x10, 0x1f, 0x9c, 0xf6, 0xaf, 0xf4, 0x8b, 0xca, 0x6a, 0x17, 0xa1,
-	0x0b, 0x30, 0x2c, 0x3a, 0xa0, 0x32, 0x70, 0xa9, 0x83, 0x61, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12,
-	0xaf, 0xb1, 0xea, 0x9a, 0x87, 0x42, 0x9c, 0x78, 0x4d, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xbf, 0xb7,
-	0x7d, 0x3f, 0x52, 0x77, 0xb0, 0xda, 0x6a, 0xf3, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x7b, 0xb7, 0x48,
-	0xe0, 0x91, 0x86, 0x99, 0x82, 0x42, 0xdd, 0xbd, 0x57, 0x75, 0x20, 0x36, 0x71, 0x29, 0x07, 0xe1,
-	0x87, 0x6c, 0xef, 0x8a, 0x27, 0x76, 0xec, 0xf1, 0x51, 0xe5, 0xb1, 0x2b, 0x24, 0x1c, 0x7d, 0x02,
-	0x4e, 0xa8, 0x70, 0x8f, 0x62, 0x65, 0xca, 0x16, 0x07, 0x0c, 0x89, 0xd8, 0x89, 0x85, 0x6c, 0x34,
-	0x9c, 0x57, 0x1f, 0xbd, 0x0e, 0xe3, 0xe2, 0x19, 0x26, 0x29, 0x0e, 0x9a, 0xe6, 0x8b, 0x57, 0x0d,
-	0x28, 0x4e, 0x60, 0xcb, 0x24, 0x1a, 0xec, 0x7d, 0x22, 0x29, 0x0c, 0xa5, 0x93, 0x68, 0xe8, 0x70,
-	0x9c, 0xaa, 0x81, 0xe6, 0x60, 0x82, 0xb3, 0x9d, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0xf8, 0x93, 0xaa,
-	0x0d, 0x79, 0xdd, 0x04, 0xe3, 0x24, 0x3e, 0xba, 0x04, 0xa3, 0x4e, 0x50, 0xdb, 0x74, 0x23, 0x52,
-	0xa3, 0xbb, 0x8a, 0x59, 0x10, 0x6a, 0xf6, 0x9f, 0x73, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0d, 0xe8,
-	0x0b, 0xef, 0x38, 0x2d, 0x71, 0xfa, 0xe4, 0x1f, 0xe5, 0x6a, 0x05, 0x73, 0xd3, 0x2f, 0xfa, 0x1f,
-	0xb3, 0x9a, 0xf6, 0x3d, 0x38, 0x92, 0x11, 0x16, 0x87, 0x2e, 0x3d, 0xa7, 0xe5, 0xca, 0x51, 0x49,
-	0xb8, 0x69, 0xcc, 0x55, 0xca, 0x72, 0x3c, 0x34, 0x2c, 0xba, 0xbe, 0x59, 0xf8, 0x1c, 0x2d, 0xdd,
-	0xb8, 0x5a, 0xdf, 0xcb, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x5f, 0x0b, 0x30, 0x91, 0xa1, 0x1e, 0x64,
-	0x29, 0xaf, 0x13, 0xef, 0xbc, 0x38, 0xc3, 0xb5, 0x99, 0xd5, 0xa5, 0x70, 0x80, 0xac, 0x2e, 0xc5,
-	0x6e, 0x59, 0x5d, 0xfa, 0xde, 0x4b, 0x56, 0x17, 0x73, 0xc4, 0xfa, 0x7b, 0x1a, 0xb1, 0x8c, 0x4c,
-	0x30, 0x03, 0x07, 0xcc, 0x04, 0x63, 0x0c, 0xfa, 0x60, 0x0f, 0x83, 0xfe, 0xd3, 0x05, 0x98, 0x4c,
-	0x6a, 0x16, 0x0f, 0x41, 0x3a, 0xff, 0xa6, 0x21, 0x9d, 0x3f, 0xdf, 0x4b, 0x04, 0x81, 0x5c, 0x49,
-	0x3d, 0x4e, 0x48, 0xea, 0x9f, 0xee, 0x89, 0x5a, 0x67, 0xa9, 0xfd, 0x2f, 0x15, 0xe0, 0x58, 0xa6,
-	0xc2, 0xf5, 0x10, 0xc6, 0xe6, 0xba, 0x31, 0x36, 0xcf, 0xf5, 0x1c, 0x5d, 0x21, 0x77, 0x80, 0x6e,
-	0x25, 0x06, 0xe8, 0x42, 0xef, 0x24, 0x3b, 0x8f, 0xd2, 0x37, 0x8b, 0x70, 0x26, 0xb3, 0x5e, 0x2c,
-	0xdc, 0x5e, 0x36, 0x84, 0xdb, 0xcf, 0x27, 0x84, 0xdb, 0x76, 0xe7, 0xda, 0x0f, 0x46, 0xda, 0x2d,
-	0xa2, 0x0c, 0xb0, 0x58, 0x29, 0xf7, 0x29, 0xe9, 0x36, 0xa2, 0x0c, 0x28, 0x42, 0xd8, 0xa4, 0xfb,
-	0xbd, 0x24, 0xe1, 0xfe, 0x1f, 0x2d, 0x38, 0x99, 0x39, 0x37, 0x87, 0x20, 0x67, 0x5c, 0x35, 0xe5,
-	0x8c, 0x4f, 0xf5, 0xbc, 0x5a, 0x73, 0x04, 0x8f, 0x5f, 0x1c, 0xc8, 0xf9, 0x16, 0x26, 0xfe, 0xb8,
-	0x0e, 0x23, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xc5, 0xaf, 0xab, 0x04, 0x10, 0xcf, 0xb1, 0xc7, 0x69,
-	0x5c, 0xbc, 0xbf, 0x5b, 0x9a, 0x49, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x05, 0x43, 0xa1,
-	0xcc, 0xdd, 0xd9, 0x77, 0xff, 0xb9, 0x3b, 0x19, 0x27, 0xa9, 0xc4, 0x3b, 0x8a, 0x24, 0xfa, 0x7e,
-	0x3d, 0x6a, 0x55, 0x07, 0xc1, 0x26, 0xef, 0xe4, 0x7d, 0xc4, 0xae, 0x7a, 0x1e, 0x60, 0x5b, 0xbd,
-	0xa3, 0x92, 0xa2, 0x1b, 0xed, 0x85, 0xa5, 0x61, 0xa1, 0x37, 0x60, 0x32, 0xe4, 0x01, 0x5b, 0x63,
-	0x13, 0x19, 0xbe, 0x16, 0x59, 0xcc, 0xbb, 0x6a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x59, 0xb6, 0xca,
-	0x8c, 0xa1, 0xf8, 0xf2, 0x3c, 0x17, 0xb7, 0x28, 0x0c, 0xa2, 0x8e, 0x26, 0x27, 0x81, 0x0d, 0xbf,
-	0x56, 0x13, 0x7d, 0x0a, 0x80, 0x2e, 0x22, 0x21, 0xc2, 0x19, 0xcc, 0x3f, 0x42, 0xe9, 0xd9, 0x52,
-	0xcf, 0xf4, 0xc0, 0x60, 0xe1, 0x01, 0x16, 0x15, 0x11, 0xac, 0x11, 0x44, 0x0e, 0x8c, 0xc5, 0xff,
-	0xe2, 0xac, 0xf4, 0xe7, 0x73, 0x5b, 0x48, 0x12, 0x67, 0xea, 0x8d, 0x45, 0x9d, 0x04, 0x36, 0x29,
-	0xa2, 0x4f, 0xc2, 0xc9, 0xed, 0x5c, 0xbb, 0x23, 0xce, 0x4b, 0xb2, 0x34, 0xf3, 0xf9, 0xd6, 0x46,
-	0xf9, 0xf5, 0xed, 0xff, 0x09, 0xe0, 0x91, 0x0e, 0x27, 0x3d, 0x9a, 0x33, 0x6d, 0x06, 0x9e, 0x49,
-	0xca, 0x55, 0x66, 0x32, 0x2b, 0x1b, 0x82, 0x96, 0xc4, 0x86, 0x2a, 0xbc, 0xe7, 0x0d, 0xf5, 0x13,
-	0x96, 0xf6, 0xcc, 0xe2, 0x16, 0xe5, 0x1f, 0x3b, 0xe0, 0x0d, 0xf6, 0x00, 0x45, 0x60, 0xeb, 0x19,
-	0x72, 0xa4, 0xe7, 0x7b, 0xee, 0x4e, 0xef, 0x82, 0xa5, 0xdf, 0xc9, 0x0e, 0x71, 0xcf, 0x45, 0x4c,
-	0x97, 0x0f, 0xfa, 0xfd, 0x87, 0x15, 0xee, 0xfe, 0x1b, 0x16, 0x9c, 0x4c, 0x15, 0xf3, 0x3e, 0x90,
-	0x50, 0x44, 0xe9, 0x5b, 0x7d, 0xcf, 0x9d, 0x97, 0x04, 0xf9, 0x37, 0x5c, 0x11, 0xdf, 0x70, 0x32,
-	0x17, 0x2f, 0xd9, 0xf5, 0x2f, 0xfd, 0x7d, 0xe9, 0x08, 0x6b, 0xc0, 0x44, 0xc4, 0xf9, 0x5d, 0x47,
-	0x2d, 0x38, 0x5b, 0x6b, 0x07, 0x41, 0xbc, 0x58, 0x33, 0x36, 0x27, 0x7f, 0x2d, 0x3e, 0xbe, 0xb7,
-	0x5b, 0x3a, 0xbb, 0xd0, 0x05, 0x17, 0x77, 0xa5, 0x86, 0x3c, 0x40, 0xcd, 0x94, 0x75, 0x1f, 0x3b,
-	0x00, 0x72, 0xa4, 0x40, 0x69, 0x5b, 0x40, 0x6e, 0xa7, 0x9b, 0x61, 0x23, 0x98, 0x41, 0xf9, 0x70,
-	0x65, 0x37, 0xdf, 0x99, 0x78, 0xfa, 0x33, 0xd7, 0xe0, 0x4c, 0xe7, 0xc5, 0x74, 0xa0, 0x10, 0x14,
-	0x7f, 0x63, 0xc1, 0xe9, 0x8e, 0x71, 0xce, 0xbe, 0x0b, 0x1f, 0x0b, 0xf6, 0xe7, 0x2d, 0x78, 0x34,
-	0xb3, 0x46, 0xd2, 0x79, 0xb0, 0x46, 0x0b, 0x35, 0x63, 0xd8, 0x38, 0xe2, 0x8f, 0x04, 0xe0, 0x18,
-	0xc7, 0xb0, 0x17, 0x2d, 0x74, 0xb5, 0x17, 0xfd, 0x53, 0x0b, 0x52, 0x57, 0xfd, 0x21, 0x70, 0x9e,
-	0x65, 0x93, 0xf3, 0x7c, 0xbc, 0x97, 0xd1, 0xcc, 0x61, 0x3a, 0xff, 0x79, 0x02, 0x8e, 0xe7, 0x78,
-	0x90, 0x6f, 0xc3, 0xd4, 0x46, 0x8d, 0x98, 0x21, 0x43, 0x3a, 0x85, 0xd2, 0xeb, 0x18, 0x5f, 0x64,
-	0xfe, 0xd8, 0xde, 0x6e, 0x69, 0x2a, 0x85, 0x82, 0xd3, 0x4d, 0xa0, 0xcf, 0x5b, 0x70, 0xd4, 0xb9,
-	0x13, 0x2e, 0xd1, 0x17, 0x84, 0x5b, 0x9b, 0x6f, 0xf8, 0xb5, 0x2d, 0xca, 0x98, 0xc9, 0x6d, 0xf5,
-	0x62, 0xa6, 0x28, 0xfc, 0x56, 0x35, 0x85, 0x6f, 0x34, 0x3f, 0xbd, 0xb7, 0x5b, 0x3a, 0x9a, 0x85,
-	0x85, 0x33, 0xdb, 0x42, 0x58, 0xe4, 0x38, 0x73, 0xa2, 0xcd, 0x4e, 0x41, 0x6d, 0xb2, 0x5c, 0xfd,
-	0x39, 0x4b, 0x2c, 0x21, 0x58, 0xd1, 0x41, 0x9f, 0x81, 0xe1, 0x0d, 0x19, 0xbf, 0x22, 0x83, 0xe5,
-	0x8e, 0x07, 0xb2, 0x73, 0x54, 0x0f, 0x6e, 0x80, 0xa3, 0x90, 0x70, 0x4c, 0x14, 0xbd, 0x0e, 0x45,
-	0x6f, 0x3d, 0x14, 0xa1, 0xf5, 0xb2, 0xed, 0x80, 0x4d, 0x4b, 0x6b, 0x1e, 0x3a, 0x6a, 0x75, 0xb9,
-	0x8a, 0x69, 0x45, 0x74, 0x05, 0x8a, 0xc1, 0xed, 0xba, 0xd0, 0xe3, 0x64, 0x6e, 0x52, 0x3c, 0xbf,
-	0x98, 0xd3, 0x2b, 0x46, 0x09, 0xcf, 0x2f, 0x62, 0x4a, 0x02, 0x55, 0xa0, 0x9f, 0xb9, 0x5d, 0x0b,
-	0xd6, 0x36, 0xf3, 0x29, 0xdf, 0x21, 0x7c, 0x01, 0xf7, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x6b,
-	0x30, 0x50, 0x73, 0xbd, 0x3a, 0x09, 0x04, 0x2f, 0xfb, 0xe1, 0x4c, 0x8d, 0x0d, 0xc3, 0xc8, 0xa1,
-	0xc9, 0x15, 0x18, 0x0c, 0x03, 0x0b, 0x5a, 0x8c, 0x2a, 0x69, 0x6d, 0xae, 0xcb, 0x1b, 0x2b, 0x9b,
-	0x2a, 0x69, 0x6d, 0x2e, 0x57, 0x3b, 0x52, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xbd,
-	0x26, 0x5c, 0xaa, 0x33, 0xc5, 0x9b, 0x66, 0xf4, 0xaf, 0xf9, 0x81, 0xbd, 0xdd, 0x52, 0x61, 0x79,
-	0x01, 0x17, 0xd6, 0x6b, 0x68, 0x15, 0x06, 0xd7, 0x79, 0xbc, 0x20, 0x21, 0x1f, 0x7d, 0x32, 0x3b,
-	0x94, 0x51, 0x2a, 0xa4, 0x10, 0xf7, 0x6d, 0x15, 0x00, 0x2c, 0x89, 0xb0, 0x94, 0x5b, 0x2a, 0xee,
-	0x91, 0x08, 0xbb, 0x3a, 0x7b, 0xb0, 0x58, 0x55, 0xfc, 0xa9, 0x11, 0x47, 0x4f, 0xc2, 0x1a, 0x45,
-	0xba, 0xaa, 0x9d, 0x7b, 0xed, 0x80, 0xe5, 0xe4, 0x10, 0x8a, 0x99, 0xcc, 0x55, 0x3d, 0x27, 0x91,
-	0x3a, 0xad, 0x6a, 0x85, 0x84, 0x63, 0xa2, 0x68, 0x0b, 0xc6, 0xb6, 0xc3, 0xd6, 0x26, 0x91, 0x5b,
-	0x9a, 0x85, 0xeb, 0xcb, 0xe1, 0x66, 0x6f, 0x0a, 0x44, 0x37, 0x88, 0xda, 0x4e, 0x23, 0x75, 0x0a,
-	0xb1, 0x67, 0xcd, 0x4d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xb7, 0xed, 0xdf, 0xde, 0x89,
-	0x88, 0x88, 0x96, 0x9a, 0x39, 0xfc, 0x6f, 0x71, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0,
-	0x4d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xcc, 0x0f, 0xc5, 0x3e, 0x27, 0x91, 0x72, 0x06, 0x85, 0x9d,
-	0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x36, 0xfd, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x54, 0xfe, 0x29,
-	0x59, 0xc9, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x54, 0x87, 0xf1, 0x96, 0x1f,
-	0x44, 0x77, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x41, 0x50, 0x6a, 0x60, 0x8a, 0x16, 0x99, 0x59, 0x90,
-	0x09, 0xc1, 0x09, 0x9a, 0xe8, 0xe3, 0x30, 0x18, 0xd6, 0x9c, 0x06, 0x29, 0x5f, 0x9f, 0x3e, 0x92,
-	0x7f, 0xfd, 0x54, 0x39, 0x4a, 0xce, 0xea, 0xe2, 0xe1, 0x9e, 0x38, 0x0a, 0x96, 0xe4, 0xd0, 0x32,
-	0xf4, 0xb3, 0x54, 0xd6, 0x2c, 0xb4, 0x6f, 0x4e, 0x44, 0xf9, 0x94, 0x53, 0x0f, 0x3f, 0x9b, 0x58,
-	0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x92, 0x02, 0x3f, 0x9c, 0x3e, 0x96, 0xbf, 0x07, 0x84, 0x80,
-	0xe1, 0x7a, 0xb5, 0xd3, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x1e, 0xef,
-	0x60, 0xb0, 0x99, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xc7, 0x43, 0x69,
-	0x9e, 0x85, 0x49, 0x98, 0xfe, 0x63, 0x2b, 0x65, 0xb1, 0xf1, 0x91, 0x5e, 0x05, 0xde, 0x0f, 0xf0,
-	0xe1, 0xfa, 0x79, 0x0b, 0x8e, 0xb7, 0x32, 0x3f, 0x44, 0x30, 0x00, 0xbd, 0xc9, 0xcd, 0xf9, 0xa7,
-	0xab, 0x30, 0xd0, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0x52, 0x38, 0x50, 0x7c, 0xcf, 0xc2, 0x81, 0x15,
-	0x18, 0xaa, 0xf1, 0x97, 0x9c, 0x4c, 0x5f, 0xd0, 0x53, 0x10, 0x53, 0xae, 0xa7, 0x15, 0x15, 0xb1,
-	0x22, 0x81, 0x7e, 0xd2, 0x82, 0xd3, 0xc9, 0xae, 0x63, 0xc2, 0xc0, 0xc2, 0x5c, 0x93, 0x8b, 0xb5,
-	0x96, 0xc5, 0xf7, 0xa7, 0xf8, 0x7f, 0x03, 0x79, 0xbf, 0x1b, 0x02, 0xee, 0xdc, 0x18, 0x5a, 0xcc,
-	0x90, 0xab, 0x0d, 0x98, 0x3a, 0xc9, 0x1e, 0x64, 0x6b, 0x2f, 0xc2, 0x68, 0xd3, 0x6f, 0x7b, 0x91,
-	0xb0, 0xba, 0x14, 0xa6, 0x5b, 0xcc, 0x64, 0x69, 0x45, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x22, 0x37,
-	0x74, 0xdf, 0x12, 0xb9, 0x77, 0x60, 0xd4, 0xd3, 0x1c, 0x12, 0x3a, 0xbd, 0x60, 0x85, 0x74, 0x51,
-	0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0xb3, 0xb4, 0x0c, 0xde, 0x9b, 0xb4, 0xec, 0x50,
-	0x9f, 0xc4, 0xf6, 0x6f, 0x16, 0x32, 0x5e, 0x0c, 0x5c, 0x2a, 0xf7, 0x9a, 0x29, 0x95, 0x3b, 0x97,
-	0x94, 0xca, 0xa5, 0x54, 0x55, 0x86, 0x40, 0xae, 0xf7, 0x1c, 0x9a, 0x3d, 0x07, 0xa6, 0xfe, 0x61,
-	0x0b, 0x4e, 0x30, 0xdd, 0x07, 0x6d, 0xe0, 0x3d, 0xeb, 0x3b, 0x98, 0x41, 0xec, 0xb5, 0x6c, 0x72,
-	0x38, 0xaf, 0x1d, 0xbb, 0x01, 0x67, 0xbb, 0xdd, 0xbb, 0xcc, 0xbe, 0xb8, 0xae, 0xcc, 0x2b, 0x62,
-	0xfb, 0xe2, 0x7a, 0x79, 0x11, 0x33, 0x48, 0xaf, 0x61, 0x17, 0xed, 0xff, 0xdb, 0x82, 0x62, 0xc5,
-	0xaf, 0x1f, 0xc2, 0x8b, 0xfe, 0x63, 0xc6, 0x8b, 0xfe, 0x91, 0xec, 0x1b, 0xbf, 0x9e, 0xab, 0xec,
-	0x5b, 0x4a, 0x28, 0xfb, 0x4e, 0xe7, 0x11, 0xe8, 0xac, 0xda, 0xfb, 0xe5, 0x22, 0x8c, 0x54, 0xfc,
-	0xba, 0xda, 0x67, 0xff, 0xfd, 0xfd, 0xb8, 0x11, 0xe5, 0x66, 0xcd, 0xd2, 0x28, 0x33, 0x7b, 0x62,
-	0x19, 0xf5, 0xe2, 0xbb, 0xcc, 0x9b, 0xe8, 0x16, 0x71, 0x37, 0x36, 0x23, 0x52, 0x4f, 0x7e, 0xce,
-	0xe1, 0x79, 0x13, 0x7d, 0xab, 0x08, 0x13, 0x89, 0xd6, 0x51, 0x03, 0xc6, 0x1a, 0xba, 0x2a, 0x49,
-	0xac, 0xd3, 0xfb, 0xd2, 0x42, 0x09, 0x6f, 0x0c, 0xad, 0x08, 0x9b, 0xc4, 0xd1, 0x2c, 0x80, 0xa7,
-	0xdb, 0xa4, 0xab, 0x00, 0xcb, 0x9a, 0x3d, 0xba, 0x86, 0x81, 0x5e, 0x82, 0x91, 0xc8, 0x6f, 0xf9,
-	0x0d, 0x7f, 0x63, 0xe7, 0x2a, 0x91, 0x11, 0x39, 0x95, 0xc9, 0xf2, 0x5a, 0x0c, 0xc2, 0x3a, 0x1e,
-	0xba, 0x0b, 0x53, 0x8a, 0x48, 0xf5, 0x01, 0xa8, 0xd7, 0x98, 0xd8, 0x64, 0x35, 0x49, 0x11, 0xa7,
-	0x1b, 0x41, 0xaf, 0xc0, 0x38, 0xb3, 0x9d, 0x66, 0xf5, 0xaf, 0x92, 0x1d, 0x19, 0xa9, 0x99, 0x71,
-	0xd8, 0x2b, 0x06, 0x04, 0x27, 0x30, 0xd1, 0x02, 0x4c, 0x35, 0xdd, 0x30, 0x51, 0x7d, 0x80, 0x55,
-	0x67, 0x1d, 0x58, 0x49, 0x02, 0x71, 0x1a, 0xdf, 0xfe, 0x75, 0x31, 0xc7, 0x5e, 0xe4, 0x7e, 0xb0,
-	0x1d, 0xdf, 0xdf, 0xdb, 0xf1, 0x9b, 0x16, 0x4c, 0xd2, 0xd6, 0x99, 0x41, 0xa8, 0x64, 0xa4, 0x54,
-	0x2e, 0x0f, 0xab, 0x43, 0x2e, 0x8f, 0x73, 0xf4, 0xd8, 0xae, 0xfb, 0xed, 0x48, 0x48, 0x47, 0xb5,
-	0x73, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0x84, 0xd7, 0xbd, 0x8e, 0x47, 0x82, 0x00,
-	0x0b, 0xa8, 0x4c, 0xf5, 0xd1, 0x97, 0x9d, 0xea, 0x83, 0x47, 0x6c, 0x17, 0x76, 0x74, 0x82, 0xa5,
-	0xd5, 0x22, 0xb6, 0x4b, 0x03, 0xbb, 0x18, 0xc7, 0xfe, 0x76, 0x11, 0x46, 0x2b, 0x7e, 0x3d, 0x36,
-	0xec, 0x78, 0xd1, 0x30, 0xec, 0x38, 0x9b, 0x30, 0xec, 0x98, 0xd4, 0x71, 0x35, 0x33, 0x8e, 0x37,
-	0x01, 0xf9, 0x22, 0x90, 0xfc, 0x65, 0xe2, 0x31, 0xbb, 0x37, 0x61, 0xa8, 0x57, 0x8c, 0xcd, 0x1e,
-	0xae, 0xa7, 0x30, 0x70, 0x46, 0xad, 0x0f, 0x4c, 0x42, 0x0e, 0xd7, 0x24, 0xe4, 0x4f, 0x2c, 0xb6,
-	0x02, 0x16, 0x57, 0xab, 0xdc, 0x56, 0x19, 0x5d, 0x84, 0x11, 0x76, 0x5a, 0xb2, 0x90, 0x11, 0xd2,
-	0x72, 0x82, 0xa5, 0xf1, 0x5c, 0x8d, 0x8b, 0xb1, 0x8e, 0x83, 0xce, 0xc3, 0x50, 0x48, 0x9c, 0xa0,
-	0xb6, 0xa9, 0xae, 0x0a, 0x61, 0xe6, 0xc0, 0xcb, 0xb0, 0x82, 0xa2, 0xb7, 0xe2, 0xc0, 0xe3, 0xc5,
-	0x7c, 0xc3, 0x67, 0xbd, 0x3f, 0x7c, 0xbb, 0xe5, 0x47, 0x1b, 0xb7, 0x6f, 0x01, 0x4a, 0xe3, 0xf7,
-	0xe0, 0x49, 0x56, 0x32, 0x43, 0xe3, 0x0e, 0xa7, 0xc2, 0xe2, 0xfe, 0x9b, 0x05, 0xe3, 0x15, 0xbf,
-	0x4e, 0x8f, 0x81, 0xef, 0xa5, 0x3d, 0xaf, 0x67, 0x5d, 0x18, 0xe8, 0x90, 0x75, 0xe1, 0x31, 0xe8,
-	0xaf, 0xf8, 0xf5, 0x2e, 0xe1, 0x7b, 0x7f, 0xc5, 0x82, 0xc1, 0x8a, 0x5f, 0x3f, 0x04, 0x25, 0xce,
-	0x6b, 0xa6, 0x12, 0xe7, 0x44, 0xce, 0xba, 0xc9, 0xd1, 0xdb, 0xfc, 0x79, 0x1f, 0x8c, 0xd1, 0x7e,
-	0xfa, 0x1b, 0x72, 0x2a, 0x8d, 0x61, 0xb3, 0x7a, 0x18, 0x36, 0xfa, 0xa4, 0xf0, 0x1b, 0x0d, 0xff,
-	0x4e, 0x72, 0x5a, 0x97, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x2c, 0x0c, 0xb5, 0x02, 0xb2, 0xed, 0xfa,
-	0x82, 0x57, 0xd7, 0x54, 0x62, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x7d, 0xc4, 0x87, 0xae, 0x47, 0xf9,
-	0x92, 0x9a, 0xef, 0xd5, 0xb9, 0x9e, 0xa3, 0x28, 0x52, 0x83, 0x69, 0xe5, 0xd8, 0xc0, 0x42, 0xb7,
-	0x60, 0x98, 0xfd, 0x67, 0xc7, 0x4e, 0xff, 0x81, 0x8f, 0x1d, 0x91, 0x2c, 0x59, 0x10, 0xc0, 0x31,
-	0x2d, 0xf4, 0x3c, 0x40, 0x24, 0xd3, 0xeb, 0x84, 0x22, 0x8c, 0xab, 0x7a, 0xd7, 0xa8, 0xc4, 0x3b,
-	0x21, 0xd6, 0xb0, 0xd0, 0x33, 0x30, 0x1c, 0x39, 0x6e, 0xe3, 0x9a, 0xeb, 0x31, 0x5b, 0x00, 0xda,
-	0x7f, 0x91, 0xb3, 0x58, 0x14, 0xe2, 0x18, 0x4e, 0xf9, 0x4a, 0x16, 0xdd, 0x6a, 0x7e, 0x27, 0x12,
-	0xe9, 0xf9, 0x8a, 0x9c, 0xaf, 0xbc, 0xa6, 0x4a, 0xb1, 0x86, 0x81, 0x36, 0xe1, 0x94, 0xeb, 0xb1,
-	0x34, 0x5a, 0xa4, 0xba, 0xe5, 0xb6, 0xd6, 0xae, 0x55, 0x6f, 0x92, 0xc0, 0x5d, 0xdf, 0x99, 0x77,
-	0x6a, 0x5b, 0xc4, 0xab, 0x33, 0xb1, 0xc3, 0xd0, 0xfc, 0xe3, 0xa2, 0x8b, 0xa7, 0xca, 0x1d, 0x70,
-	0x71, 0x47, 0x4a, 0xc8, 0xa6, 0xdb, 0x31, 0x20, 0x4e, 0x53, 0xc8, 0x17, 0x78, 0x0a, 0x1e, 0x56,
-	0x82, 0x05, 0xc4, 0x7e, 0x81, 0xed, 0x89, 0xeb, 0x55, 0xf4, 0xb4, 0x71, 0xbc, 0x1c, 0xd7, 0x8f,
-	0x97, 0xfd, 0xdd, 0xd2, 0xc0, 0xf5, 0xaa, 0x16, 0xe9, 0xe8, 0x12, 0x1c, 0xab, 0xf8, 0xf5, 0x8a,
-	0x1f, 0x44, 0xcb, 0x7e, 0x70, 0xc7, 0x09, 0xea, 0x72, 0x09, 0x96, 0x64, 0xac, 0x27, 0x7a, 0xc6,
-	0xf6, 0xf3, 0x13, 0xc8, 0x88, 0xe3, 0xf4, 0x02, 0xe3, 0x10, 0x0f, 0xe8, 0x5a, 0x5b, 0x63, 0xbc,
-	0x8a, 0x4a, 0x56, 0x77, 0xd9, 0x89, 0x08, 0xba, 0x0e, 0x63, 0x35, 0xfd, 0xda, 0x16, 0xd5, 0x9f,
-	0x92, 0x97, 0x9d, 0x71, 0xa7, 0x67, 0xde, 0xf3, 0x66, 0x7d, 0xfb, 0x1b, 0x96, 0x68, 0x85, 0x4b,
-	0x3e, 0xb8, 0x0d, 0x6d, 0xf7, 0x33, 0x77, 0x01, 0xa6, 0x02, 0xbd, 0x8a, 0x66, 0x8b, 0x76, 0x8c,
-	0x67, 0xff, 0x49, 0x00, 0x71, 0x1a, 0x1f, 0x7d, 0x12, 0x4e, 0x1a, 0x85, 0x52, 0x2d, 0xaf, 0xe5,
-	0xe0, 0x66, 0xb2, 0x21, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x08, 0xc7, 0x93, 0xdf, 0x25,
-	0xa4, 0x35, 0xf7, 0xf9, 0x75, 0x85, 0x83, 0x7d, 0x9d, 0xfd, 0x12, 0x4c, 0xd1, 0x67, 0xbc, 0x62,
-	0x49, 0xd9, 0xfc, 0x75, 0x0f, 0xa7, 0xf5, 0xdb, 0x43, 0xec, 0x1a, 0x4c, 0x64, 0xa0, 0x43, 0x9f,
-	0x86, 0xf1, 0x90, 0xb0, 0x18, 0x72, 0x52, 0x4a, 0xd8, 0xc1, 0x2f, 0xbe, 0xba, 0xa4, 0x63, 0xf2,
-	0x97, 0x90, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x13, 0xc6, 0xef, 0xb8, 0x5e, 0xdd, 0xbf, 0x13, 0x4a,
-	0xfa, 0x43, 0xf9, 0x2a, 0x87, 0x5b, 0x1c, 0x33, 0xd1, 0x47, 0xa3, 0xb9, 0x5b, 0x06, 0x31, 0x9c,
-	0x20, 0x4e, 0x8f, 0x9a, 0xa0, 0xed, 0xcd, 0x85, 0x37, 0x42, 0x12, 0x88, 0x08, 0x77, 0xec, 0xa8,
-	0xc1, 0xb2, 0x10, 0xc7, 0x70, 0x7a, 0xd4, 0xb0, 0x3f, 0xcc, 0xb1, 0x9e, 0x9d, 0x65, 0xe2, 0xa8,
-	0xc1, 0xaa, 0x14, 0x6b, 0x18, 0xf4, 0x28, 0x66, 0xff, 0x56, 0x7d, 0x0f, 0xfb, 0x7e, 0x24, 0x0f,
-	0x6f, 0x96, 0xae, 0x53, 0x2b, 0xc7, 0x06, 0x56, 0x4e, 0x3c, 0xbd, 0xbe, 0x83, 0xc6, 0xd3, 0x43,
-	0x51, 0x87, 0x58, 0x02, 0x3c, 0x22, 0xf4, 0xa5, 0x4e, 0xb1, 0x04, 0xf6, 0xef, 0x2b, 0xce, 0x00,
-	0xe5, 0x05, 0xd6, 0xc5, 0x00, 0xf5, 0xf3, 0x80, 0x81, 0x4c, 0x29, 0x5a, 0xe5, 0xa3, 0x23, 0x61,
-	0x68, 0x09, 0x06, 0xc3, 0x9d, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x29, 0x59, 0xab, 0x0c, 0x45, 0xcb,
-	0x08, 0xce, 0xab, 0x60, 0x59, 0x17, 0xd5, 0xe0, 0x88, 0xa0, 0xb8, 0xb0, 0xe9, 0x78, 0x2a, 0x51,
-	0x24, 0xb7, 0x7e, 0xbc, 0xb8, 0xb7, 0x5b, 0x3a, 0x22, 0x5a, 0xd6, 0xc1, 0xfb, 0xbb, 0x25, 0xba,
-	0x25, 0x33, 0x20, 0x38, 0x8b, 0x1a, 0x5f, 0xf2, 0xb5, 0x9a, 0xdf, 0x6c, 0x55, 0x02, 0x7f, 0xdd,
-	0x6d, 0x90, 0x4e, 0x8a, 0xe5, 0xaa, 0x81, 0x29, 0x96, 0xbc, 0x51, 0x86, 0x13, 0xd4, 0xd0, 0x6d,
-	0x98, 0x70, 0x5a, 0xad, 0xb9, 0xa0, 0xe9, 0x07, 0xb2, 0x81, 0x91, 0x7c, 0x0d, 0xc5, 0x9c, 0x89,
-	0xca, 0xf3, 0x44, 0x26, 0x0a, 0x71, 0x92, 0x20, 0x1d, 0x28, 0xb1, 0xd1, 0x8c, 0x81, 0x1a, 0x8b,
-	0x07, 0x4a, 0xec, 0xcb, 0x8c, 0x81, 0xca, 0x80, 0xe0, 0x2c, 0x6a, 0xf6, 0x0f, 0x30, 0xc6, 0x9f,
-	0xc5, 0x9b, 0x66, 0x6e, 0x46, 0x4d, 0x18, 0x6b, 0xb1, 0x63, 0x5f, 0xe4, 0x70, 0x13, 0x47, 0xc5,
-	0x8b, 0x3d, 0x0a, 0x42, 0xef, 0xb0, 0x2c, 0xb4, 0x86, 0x41, 0x6c, 0x45, 0x27, 0x87, 0x4d, 0xea,
-	0xf6, 0x2f, 0xcd, 0x30, 0xd6, 0xb1, 0xca, 0xa5, 0x9b, 0x83, 0xc2, 0xe9, 0x52, 0xc8, 0x33, 0x66,
-	0xf2, 0xf5, 0x08, 0xf1, 0xfa, 0x12, 0x8e, 0x9b, 0x58, 0xd6, 0x45, 0x9f, 0x82, 0x71, 0xd7, 0x73,
-	0xe3, 0xec, 0xcd, 0xe1, 0xf4, 0xd1, 0xfc, 0x68, 0x5e, 0x0a, 0x4b, 0xcf, 0xef, 0xa8, 0x57, 0xc6,
-	0x09, 0x62, 0xe8, 0x2d, 0x66, 0x23, 0x2a, 0x49, 0x17, 0x7a, 0x21, 0xad, 0x9b, 0x83, 0x4a, 0xb2,
-	0x1a, 0x11, 0xd4, 0x86, 0x23, 0xe9, 0x2c, 0xd6, 0xe1, 0xb4, 0x9d, 0xff, 0x36, 0x4a, 0x27, 0xa2,
-	0x8e, 0x13, 0xf1, 0xa5, 0x61, 0x21, 0xce, 0xa2, 0x8f, 0xae, 0x25, 0x73, 0x0c, 0x17, 0x0d, 0x0d,
-	0x44, 0x2a, 0xcf, 0xf0, 0x58, 0xc7, 0xf4, 0xc2, 0x1b, 0x70, 0x5a, 0x4b, 0xd3, 0x7a, 0x39, 0x70,
-	0x98, 0x8d, 0x92, 0xcb, 0x6e, 0x23, 0x8d, 0xa9, 0x7d, 0x74, 0x6f, 0xb7, 0x74, 0x7a, 0xad, 0x13,
-	0x22, 0xee, 0x4c, 0x07, 0x5d, 0x87, 0x63, 0x3c, 0x16, 0xcd, 0x22, 0x71, 0xea, 0x0d, 0xd7, 0x53,
-	0x5c, 0x33, 0x3f, 0xbb, 0x4e, 0xee, 0xed, 0x96, 0x8e, 0xcd, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0xe8,
-	0x35, 0x18, 0xae, 0x7b, 0xf2, 0x94, 0x1d, 0x30, 0x32, 0xe1, 0x0e, 0x2f, 0xae, 0x56, 0xd5, 0xf7,
-	0xc7, 0x7f, 0x70, 0x5c, 0x01, 0x6d, 0x70, 0x15, 0x98, 0x92, 0x5b, 0x0e, 0xa6, 0x42, 0x94, 0x26,
-	0x45, 0xfb, 0x46, 0x70, 0x07, 0xae, 0xfb, 0x55, 0x0e, 0x80, 0x46, 0xdc, 0x07, 0x83, 0x30, 0x7a,
-	0x13, 0x90, 0xc8, 0xb8, 0x34, 0x57, 0x63, 0x09, 0x02, 0x35, 0xbb, 0x54, 0x25, 0x42, 0xa8, 0xa6,
-	0x30, 0x70, 0x46, 0x2d, 0x74, 0x85, 0x1e, 0x8f, 0x7a, 0xa9, 0x38, 0x7e, 0x55, 0xbe, 0xf5, 0x45,
-	0xd2, 0x0a, 0x08, 0x33, 0xa5, 0x34, 0x29, 0xe2, 0x44, 0x3d, 0x54, 0x87, 0x53, 0x4e, 0x3b, 0xf2,
-	0x99, 0x76, 0xd1, 0x44, 0x5d, 0xf3, 0xb7, 0x88, 0xc7, 0x14, 0xfb, 0x43, 0x2c, 0xf4, 0xe9, 0xa9,
-	0xb9, 0x0e, 0x78, 0xb8, 0x23, 0x15, 0xfa, 0x9c, 0xa2, 0x63, 0xa1, 0x29, 0xfe, 0x0c, 0x3f, 0x75,
-	0xae, 0x0d, 0x97, 0x18, 0xe8, 0x25, 0x18, 0xd9, 0xf4, 0xc3, 0x68, 0x95, 0x44, 0x77, 0xfc, 0x60,
-	0x4b, 0xa4, 0x78, 0x88, 0xd3, 0xea, 0xc4, 0x20, 0xac, 0xe3, 0xa1, 0xa7, 0x60, 0x90, 0x99, 0x9d,
-	0x95, 0x17, 0xd9, 0x5d, 0x3b, 0x14, 0x9f, 0x31, 0x57, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xae,
-	0x2c, 0xb0, 0xe3, 0x38, 0x81, 0x5a, 0xae, 0x2c, 0x60, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x74, 0x02,
-	0x52, 0x09, 0xfc, 0x1a, 0x09, 0xb5, 0x64, 0x4e, 0x8f, 0xf0, 0x04, 0x16, 0x74, 0xb9, 0x56, 0xb3,
-	0x10, 0x70, 0x76, 0x3d, 0x44, 0xd2, 0x29, 0x8a, 0xc7, 0xf3, 0xd5, 0xae, 0x69, 0x76, 0xb0, 0xc7,
-	0x2c, 0xc5, 0x1e, 0x4c, 0xaa, 0xe4, 0xc8, 0x3c, 0x65, 0x45, 0x38, 0x3d, 0xc1, 0xd6, 0x76, 0xef,
-	0xf9, 0x2e, 0x94, 0x22, 0xbb, 0x9c, 0xa0, 0x84, 0x53, 0xb4, 0x8d, 0xd8, 0xba, 0x93, 0x5d, 0x63,
-	0xeb, 0x5e, 0x80, 0xe1, 0xb0, 0x7d, 0xbb, 0xee, 0x37, 0x1d, 0xd7, 0x63, 0xd6, 0x3b, 0xda, 0xc3,
-	0xbd, 0x2a, 0x01, 0x38, 0xc6, 0x41, 0xcb, 0x30, 0xe4, 0x48, 0x2d, 0x35, 0xca, 0x0f, 0x1b, 0xa8,
-	0x74, 0xd3, 0x3c, 0x92, 0x96, 0xd4, 0x4b, 0xab, 0xba, 0xe8, 0x55, 0x18, 0x13, 0xa1, 0x49, 0x78,
-	0x14, 0x1e, 0x66, 0x5d, 0xa3, 0x39, 0x53, 0x57, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x0d, 0x18, 0x89,
-	0xfc, 0x86, 0x90, 0x71, 0x86, 0xd3, 0xc7, 0xf3, 0xa3, 0xfb, 0xae, 0x29, 0x34, 0x5d, 0x7f, 0xa2,
-	0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe3, 0xeb, 0x9d, 0xa5, 0x6e, 0x22, 0xa1, 0x48, 0x48, 0x7f, 0x3a,
-	0xcf, 0xf4, 0x92, 0xa1, 0x99, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x19, 0xa6, 0x5a, 0x81,
-	0xeb, 0xb3, 0x35, 0xa1, 0xb4, 0xee, 0xd3, 0x66, 0xa2, 0xd6, 0x4a, 0x12, 0x01, 0xa7, 0xeb, 0xb0,
-	0xc8, 0x32, 0xa2, 0x70, 0xfa, 0x24, 0x4f, 0x36, 0xc7, 0xe5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a,
-	0x61, 0x27, 0x31, 0x17, 0xe1, 0x4d, 0xcf, 0xe4, 0xc7, 0x2b, 0xd0, 0x45, 0x7d, 0x9c, 0xf7, 0x57,
-	0x7f, 0x71, 0x4c, 0x01, 0xd5, 0xb5, 0x1c, 0xef, 0xf4, 0x05, 0x15, 0x4e, 0x9f, 0xea, 0x60, 0xfb,
-	0x9b, 0x78, 0x2e, 0xc7, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x06, 0x4c, 0x8a, 0xb0,
-	0x0b, 0xf1, 0x30, 0x9d, 0x8e, 0xfd, 0xa3, 0x70, 0x02, 0x86, 0x53, 0xd8, 0x3c, 0xd9, 0x9b, 0x73,
-	0xbb, 0x41, 0xc4, 0xd1, 0x77, 0xcd, 0xf5, 0xb6, 0xc2, 0xe9, 0x33, 0xec, 0x7c, 0x10, 0xc9, 0xde,
-	0x92, 0x50, 0x9c, 0x51, 0x03, 0xad, 0xc1, 0x64, 0x2b, 0x20, 0xa4, 0xc9, 0xde, 0x49, 0xe2, 0x3e,
-	0x2b, 0xf1, 0xc0, 0x4a, 0xb4, 0x27, 0x95, 0x04, 0x6c, 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b,
-	0x30, 0xe4, 0x6f, 0x93, 0x60, 0x93, 0x38, 0xf5, 0xe9, 0xb3, 0x1d, 0xbc, 0xf6, 0xc4, 0xe5, 0x76,
-	0x5d, 0xe0, 0x26, 0x8c, 0x9a, 0x64, 0x71, 0x77, 0xa3, 0x26, 0xd9, 0x18, 0xfa, 0x4f, 0x2c, 0x38,
-	0x29, 0xd5, 0x84, 0xd5, 0x16, 0x1d, 0xf5, 0x05, 0xdf, 0x0b, 0xa3, 0x80, 0x87, 0x02, 0x7a, 0x34,
-	0x3f, 0x3c, 0xce, 0x5a, 0x4e, 0x25, 0xa5, 0x45, 0x38, 0x99, 0x87, 0x11, 0xe2, 0xfc, 0x16, 0xe9,
-	0xcb, 0x3e, 0x24, 0x91, 0x3c, 0x8c, 0xe6, 0xc2, 0xe5, 0xb7, 0x16, 0x57, 0xa7, 0x1f, 0xe3, 0x71,
-	0x8c, 0xe8, 0x66, 0xa8, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x2e, 0x42, 0xc1, 0x0f, 0xa7, 0x1f, 0x67,
-	0x6b, 0xfb, 0x64, 0xce, 0x38, 0x5e, 0xaf, 0x72, 0xe3, 0xd6, 0xeb, 0x55, 0x5c, 0xf0, 0x43, 0x99,
-	0x70, 0x8d, 0x3e, 0x67, 0xc3, 0xe9, 0x27, 0xb8, 0xcc, 0x59, 0x26, 0x5c, 0x63, 0x85, 0x38, 0x86,
-	0xa3, 0x4d, 0x98, 0x08, 0x0d, 0xb1, 0x41, 0x38, 0x7d, 0x8e, 0x8d, 0xd4, 0x13, 0x79, 0x93, 0x66,
-	0x60, 0x6b, 0x99, 0x90, 0x4c, 0x2a, 0x38, 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x70, 0x11, 0x4e, 0x3f,
-	0xd9, 0x65, 0x77, 0x69, 0xc8, 0xfa, 0xee, 0xd2, 0x69, 0xe0, 0x04, 0x4d, 0x74, 0x43, 0x77, 0x89,
-	0x3c, 0x9f, 0x6f, 0x28, 0x99, 0xe9, 0x0c, 0x39, 0x96, 0xe7, 0x08, 0x39, 0xf3, 0x7d, 0x30, 0x95,
-	0xe2, 0xc2, 0x0e, 0xe2, 0x1f, 0x32, 0xb3, 0x05, 0x63, 0xc6, 0x4a, 0x7f, 0xa8, 0xe6, 0x43, 0x3f,
-	0x03, 0x30, 0xac, 0xcc, 0x3a, 0x72, 0xf4, 0x6c, 0x53, 0xf7, 0xa5, 0x67, 0xbb, 0x60, 0x5a, 0x1f,
-	0x9d, 0x4c, 0x5a, 0x1f, 0x0d, 0x55, 0xfc, 0xba, 0x61, 0x70, 0xb4, 0x96, 0x11, 0x41, 0x38, 0xef,
-	0x8c, 0xee, 0xdd, 0x21, 0x4e, 0x53, 0x55, 0x15, 0x7b, 0x36, 0x63, 0xea, 0xeb, 0xa8, 0xfd, 0xba,
-	0x0c, 0x53, 0x9e, 0xcf, 0x9e, 0x11, 0xa4, 0x2e, 0x79, 0x44, 0xc6, 0x0a, 0x0e, 0xeb, 0x11, 0xee,
-	0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0xb9, 0xa4, 0xba, 0x8d, 0xb3, 0x7a, 0x58, 0x40,
-	0xe9, 0xf3, 0x95, 0xff, 0x0a, 0xa7, 0x27, 0xf3, 0x9f, 0xaf, 0xbc, 0x52, 0x92, 0x5f, 0x0c, 0x25,
-	0xbf, 0xc8, 0xb4, 0x4b, 0x2d, 0xbf, 0x5e, 0xae, 0x88, 0x97, 0x88, 0x16, 0xdb, 0xbf, 0x5e, 0xae,
-	0x60, 0x0e, 0x43, 0x73, 0x30, 0xc0, 0x7e, 0xc8, 0xc8, 0x41, 0x79, 0x27, 0x49, 0xb9, 0xa2, 0xe5,
-	0xa4, 0x65, 0x15, 0xb0, 0xa8, 0xc8, 0xb4, 0x07, 0xf4, 0xf9, 0xc6, 0xb4, 0x07, 0x83, 0xf7, 0xa9,
-	0x3d, 0x90, 0x04, 0x70, 0x4c, 0x0b, 0xdd, 0x85, 0x63, 0xc6, 0x93, 0x59, 0x79, 0x08, 0x42, 0xbe,
-	0x91, 0x42, 0x02, 0x79, 0xfe, 0xb4, 0xe8, 0xf4, 0xb1, 0x72, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a,
-	0xc0, 0x54, 0x2d, 0xd5, 0xea, 0x50, 0xef, 0xad, 0xaa, 0x75, 0x91, 0x6e, 0x31, 0x4d, 0x18, 0xbd,
-	0x0a, 0x43, 0xef, 0xfa, 0xdc, 0xa0, 0x50, 0xbc, 0x9e, 0x64, 0x7c, 0x9b, 0xa1, 0xb7, 0xae, 0x57,
-	0x59, 0xf9, 0xfe, 0x6e, 0x69, 0xa4, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x2a, 0xa0, 0x1f, 0xb3, 0x60,
-	0x26, 0xfd, 0x26, 0x57, 0x9d, 0x1e, 0xeb, 0xbd, 0xd3, 0xb6, 0x68, 0x74, 0x66, 0x29, 0x97, 0x1c,
-	0xee, 0xd0, 0x14, 0xfa, 0x28, 0xdd, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x84, 0xfe, 0x8f, 0xc6, 0xfb,
-	0x89, 0x96, 0xee, 0xef, 0x96, 0x26, 0xf8, 0xe1, 0xed, 0xde, 0x53, 0x59, 0x08, 0x78, 0x05, 0xf4,
-	0x83, 0x70, 0x2c, 0x48, 0xcb, 0xc8, 0x89, 0x7c, 0x27, 0x3c, 0xdd, 0xcb, 0x45, 0x90, 0x9c, 0x70,
-	0x9c, 0x45, 0x10, 0x67, 0xb7, 0x63, 0xff, 0xa1, 0xc5, 0x74, 0x23, 0xa2, 0x5b, 0x24, 0x6c, 0x37,
-	0xa2, 0x43, 0x30, 0xe2, 0x5b, 0x32, 0x6c, 0x13, 0xee, 0xdb, 0x0a, 0xef, 0xbf, 0xb3, 0x98, 0x15,
-	0xde, 0x21, 0xfa, 0x13, 0xbe, 0x05, 0x43, 0x91, 0x68, 0x4d, 0x74, 0x3d, 0xcf, 0x62, 0x48, 0x76,
-	0x8a, 0x59, 0x22, 0xaa, 0x77, 0x98, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0x5f, 0xf3, 0x19, 0x90, 0x90,
-	0x43, 0x50, 0x01, 0x2f, 0x9a, 0x2a, 0xe0, 0x52, 0x97, 0x2f, 0xc8, 0x51, 0x05, 0xff, 0x57, 0x66,
-	0xbf, 0x99, 0xfc, 0xf1, 0xfd, 0x6e, 0xfe, 0x69, 0x7f, 0xd1, 0x02, 0x88, 0xd3, 0xbe, 0xf4, 0x90,
-	0xc0, 0xfb, 0x12, 0x7d, 0x79, 0xf9, 0x91, 0x5f, 0xf3, 0x1b, 0x42, 0x05, 0x75, 0x2a, 0xd6, 0x42,
-	0xf3, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0xe3, 0x30, 0x17, 0x63, 0xbb, 0x08, 0x23,
-	0x06, 0xf3, 0x57, 0x2c, 0x38, 0x9a, 0xe5, 0x9c, 0x42, 0xdf, 0xf1, 0x5c, 0x12, 0xab, 0x4c, 0x73,
-	0xd5, 0x6c, 0xde, 0x14, 0xe5, 0x58, 0x61, 0xf4, 0x9c, 0x19, 0xfd, 0x60, 0x29, 0x49, 0xae, 0xc3,
-	0x58, 0x25, 0x20, 0x1a, 0x7f, 0xf1, 0x7a, 0x9c, 0x2d, 0x69, 0x78, 0xfe, 0xd9, 0x03, 0x47, 0x7c,
-	0xb2, 0xbf, 0x5a, 0x80, 0xa3, 0xdc, 0xc0, 0x6c, 0x6e, 0xdb, 0x77, 0xeb, 0x15, 0xbf, 0x2e, 0x5c,
-	0x8a, 0xdf, 0x86, 0xd1, 0x96, 0x26, 0x3e, 0xef, 0x14, 0x5e, 0x5f, 0x17, 0xb3, 0xc7, 0x02, 0x3f,
-	0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x1d, 0x46, 0xc9, 0xb6, 0x5b, 0x53, 0x96, 0x45, 0x85, 0x03, 0x5f,
-	0xd2, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0xf6, 0x6c, 0x16, 0xae, 0xb1, 0x68, 0x7d, 0x5d,
-	0xac, 0x89, 0x7e, 0xce, 0x82, 0x13, 0x39, 0xc1, 0xf8, 0x69, 0x73, 0x77, 0x98, 0x29, 0x9f, 0x58,
-	0xb6, 0xaa, 0x39, 0x6e, 0xe0, 0x87, 0x05, 0x14, 0x7d, 0x1c, 0xa0, 0x15, 0xa7, 0x30, 0xed, 0x12,
-	0xb5, 0xdc, 0x88, 0x5f, 0xac, 0x85, 0xa2, 0x55, 0x99, 0x4e, 0x35, 0x5a, 0xf6, 0x57, 0xfa, 0xa0,
-	0x9f, 0x19, 0x71, 0xa1, 0x0a, 0x0c, 0x6e, 0xf2, 0x48, 0x89, 0x1d, 0xe7, 0x8d, 0xe2, 0xca, 0xd0,
-	0x8b, 0xf1, 0xbc, 0x69, 0xa5, 0x58, 0x92, 0x41, 0x2b, 0x70, 0x84, 0xa7, 0x67, 0x6d, 0x2c, 0x92,
-	0x86, 0xb3, 0x23, 0x25, 0xd3, 0x05, 0xf6, 0xa9, 0x4a, 0x42, 0x5f, 0x4e, 0xa3, 0xe0, 0xac, 0x7a,
-	0xe8, 0x75, 0x18, 0x8f, 0xdc, 0x26, 0xf1, 0xdb, 0x91, 0xa4, 0xc4, 0xf3, 0xa1, 0xaa, 0xc7, 0xd3,
-	0x9a, 0x01, 0xc5, 0x09, 0x6c, 0xf4, 0x2a, 0x8c, 0xb5, 0x52, 0x32, 0xf8, 0xfe, 0x58, 0x58, 0x65,
-	0xca, 0xdd, 0x4d, 0x5c, 0xe6, 0x9f, 0xd2, 0x66, 0xde, 0x38, 0x6b, 0x9b, 0x01, 0x09, 0x37, 0xfd,
-	0x46, 0x9d, 0x71, 0xc0, 0xfd, 0x9a, 0x7f, 0x4a, 0x02, 0x8e, 0x53, 0x35, 0x28, 0x95, 0x75, 0xc7,
-	0x6d, 0xb4, 0x03, 0x12, 0x53, 0x19, 0x30, 0xa9, 0x2c, 0x27, 0xe0, 0x38, 0x55, 0xa3, 0xbb, 0x72,
-	0x61, 0xf0, 0xc1, 0x28, 0x17, 0xec, 0x5f, 0x2d, 0x80, 0x31, 0xb5, 0xdf, 0xc3, 0xd9, 0x56, 0x5f,
-	0x83, 0xbe, 0x8d, 0xa0, 0x55, 0x13, 0x06, 0x8b, 0x99, 0x5f, 0x76, 0x19, 0x57, 0x16, 0xf4, 0x2f,
-	0xa3, 0xff, 0x31, 0xab, 0x45, 0xf7, 0xf8, 0xb1, 0x4a, 0xe0, 0xd3, 0x4b, 0x4e, 0x06, 0x53, 0x55,
-	0x6e, 0x60, 0x83, 0xf2, 0xbd, 0xde, 0x21, 0xec, 0xb8, 0xf0, 0x65, 0xe1, 0x14, 0x0c, 0xdb, 0xbe,
-	0xaa, 0x78, 0xad, 0x4b, 0x2a, 0xe8, 0x22, 0x8c, 0x88, 0x04, 0x98, 0xcc, 0x5b, 0x89, 0x6f, 0x26,
-	0x66, 0x8b, 0xb8, 0x18, 0x17, 0x63, 0x1d, 0xc7, 0xfe, 0xf1, 0x02, 0x1c, 0xc9, 0x70, 0x37, 0xe5,
-	0xd7, 0xc8, 0x86, 0x1b, 0x46, 0xc1, 0x4e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, 0x0c, 0x7a, 0x56,
-	0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0x77, 0x2e, 0x01, 0x3d, 0xd8, 0xe5, 0x44, 0xaf, 0xed, 0x76,
-	0x48, 0x64, 0x86, 0x03, 0x75, 0x6d, 0x33, 0xc3, 0x05, 0x06, 0xa1, 0x4f, 0xc0, 0x0d, 0xa5, 0x8d,
-	0xd7, 0x9e, 0x80, 0x5c, 0x1f, 0xcf, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, 0x12, 0x0f, 0xc5,
-	0x38, 0xf2, 0x35, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0x27, 0x73, 0x1d, 0xd0, 0x69, 0xd7,
-	0x9b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xf2, 0xe4, 0xd1, 0xae, 0x49, 0x6b, 0x73, 0x45, 0x94, 0x63,
-	0x85, 0x81, 0xce, 0x41, 0x3f, 0x93, 0xdb, 0x27, 0x93, 0xdf, 0xe1, 0xf9, 0x45, 0x1e, 0x0b, 0x94,
-	0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf1, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x23, 0x79, 0xa1, 0xd0,
-	0xee, 0xfa, 0x7e, 0x03, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0xaa, 0x11, 0x3b, 0x75, 0x3f,
-	0xd4, 0x06, 0xed, 0x29, 0x18, 0xdc, 0x22, 0x3b, 0x81, 0xeb, 0x6d, 0x24, 0xad, 0x5d, 0xaf, 0xf2,
-	0x62, 0x2c, 0xe1, 0x66, 0x96, 0xf8, 0xc1, 0x07, 0x91, 0x25, 0x5e, 0x5f, 0x01, 0x43, 0x5d, 0xd9,
-	0x93, 0x9f, 0x28, 0xc2, 0x04, 0x9e, 0x5f, 0xfc, 0x60, 0x22, 0x6e, 0xa4, 0x27, 0xe2, 0x41, 0x24,
-	0x53, 0x3f, 0xd8, 0x6c, 0xfc, 0x9e, 0x05, 0x13, 0x2c, 0x0d, 0xa7, 0x88, 0x1e, 0xe3, 0xfa, 0xde,
-	0x21, 0x3c, 0x05, 0x1e, 0x83, 0xfe, 0x80, 0x36, 0x2a, 0x66, 0x50, 0xed, 0x71, 0xd6, 0x13, 0xcc,
-	0x61, 0xe8, 0x14, 0xf4, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe5, 0x47, 0xf0, 0xa2, 0x13, 0x39, 0x98,
-	0x95, 0xb2, 0x38, 0x96, 0x98, 0xb4, 0x1a, 0x2e, 0xef, 0x74, 0x6c, 0x55, 0xf1, 0xfe, 0x08, 0x4d,
-	0x93, 0xd9, 0xb5, 0xf7, 0x16, 0xc7, 0x32, 0x9b, 0x64, 0xe7, 0x67, 0xf6, 0x3f, 0x15, 0xe0, 0x4c,
-	0x66, 0xbd, 0x9e, 0xe3, 0x58, 0x76, 0xae, 0xfd, 0x30, 0x93, 0xf6, 0x15, 0x0f, 0xd1, 0x97, 0xa0,
-	0xaf, 0x57, 0xee, 0xbf, 0xbf, 0x87, 0xf0, 0x92, 0x99, 0x43, 0xf6, 0x3e, 0x09, 0x2f, 0x99, 0xd9,
-	0xb7, 0x1c, 0x31, 0xc1, 0xb7, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0xe7, 0xe9, 0x39, 0xc3, 0x80,
-	0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0x73, 0x30, 0xd1, 0x74, 0x3d, 0x7a,
-	0xf8, 0xec, 0x98, 0xac, 0xb8, 0x52, 0xb7, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x7a,
-	0x92, 0x7f, 0xdd, 0xab, 0x07, 0xda, 0x75, 0xb3, 0xa6, 0xc5, 0x89, 0x1a, 0xc5, 0x8c, 0x30, 0x94,
-	0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbb, 0x9c, 0x68, 0x34, 0x5b, 0x46, 0x34, 0xf3, 0x2a, 0x8c, 0xdd,
-	0xb7, 0x9e, 0xc5, 0xfe, 0x66, 0x11, 0x1e, 0xe9, 0xb0, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76,
-	0xd6, 0xa7, 0xe6, 0xa1, 0x02, 0x47, 0xd7, 0xdb, 0x8d, 0xc6, 0x0e, 0x73, 0xc0, 0x23, 0x75, 0x89,
-	0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xba, 0x9c, 0x81, 0x83, 0x33, 0x6b, 0xd2, 0x27, 0x16, 0xbd,
-	0x49, 0x76, 0x14, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xe5, 0x6c,
-	0x3b, 0x2e, 0x4f, 0x7a, 0x22, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x25, 0x11, 0x70, 0xba,
-	0x4e, 0x8e, 0x4a, 0xa8, 0x78, 0x5f, 0x2a, 0x21, 0x33, 0x08, 0xe2, 0x40, 0x7e, 0x10, 0xc4, 0xce,
-	0xe7, 0x62, 0xd7, 0x7c, 0x91, 0xef, 0xc0, 0xd8, 0x41, 0x2d, 0xc7, 0x9f, 0x82, 0xc1, 0x40, 0x64,
-	0xe2, 0x4f, 0x78, 0xbb, 0xcb, 0x3c, 0xe5, 0x12, 0x6e, 0xff, 0x6f, 0x16, 0x28, 0x59, 0xb2, 0x19,
-	0xef, 0xfc, 0x55, 0x66, 0x06, 0xcf, 0xa5, 0xe0, 0x5a, 0x88, 0xb3, 0x63, 0x9a, 0x19, 0x7c, 0x0c,
-	0xc4, 0x26, 0x2e, 0x5f, 0x6e, 0x61, 0x1c, 0x59, 0xc3, 0x78, 0x40, 0x08, 0x0d, 0xa4, 0xc2, 0x40,
-	0x9f, 0x80, 0xc1, 0xba, 0xbb, 0xed, 0x86, 0x42, 0x8e, 0x76, 0x60, 0x1d, 0x60, 0xfc, 0x7d, 0x8b,
-	0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x9f, 0xb2, 0x40, 0xa9, 0x4e, 0xaf, 0x10, 0xa7, 0x11, 0x6d, 0xa2,
-	0x37, 0x00, 0x24, 0x05, 0x25, 0x7b, 0x93, 0x06, 0x5d, 0x80, 0x15, 0x64, 0xdf, 0xf8, 0x87, 0xb5,
-	0x3a, 0xe8, 0x75, 0x18, 0xd8, 0x64, 0xb4, 0xc4, 0xb7, 0x9d, 0x53, 0xaa, 0x2e, 0x56, 0xba, 0xbf,
-	0x5b, 0x3a, 0x6a, 0xb6, 0x29, 0x6f, 0x31, 0x5e, 0xcb, 0xfe, 0x89, 0x42, 0x3c, 0xa7, 0x6f, 0xb5,
-	0xfd, 0xc8, 0x39, 0x04, 0x4e, 0xe4, 0xb2, 0xc1, 0x89, 0x3c, 0xd1, 0x49, 0x37, 0xcc, 0xba, 0x94,
-	0xcb, 0x81, 0x5c, 0x4f, 0x70, 0x20, 0x4f, 0x76, 0x27, 0xd5, 0x99, 0xf3, 0xf8, 0x6f, 0x2c, 0x98,
-	0x32, 0xf0, 0x0f, 0xe1, 0x02, 0x5c, 0x36, 0x2f, 0xc0, 0x47, 0xbb, 0x7e, 0x43, 0xce, 0xc5, 0xf7,
-	0xa3, 0xc5, 0x44, 0xdf, 0xd9, 0x85, 0xf7, 0x2e, 0xf4, 0x6d, 0x3a, 0x41, 0x5d, 0xbc, 0xeb, 0x2f,
-	0xf4, 0x34, 0xd6, 0xb3, 0x57, 0x9c, 0x40, 0x18, 0x83, 0x3c, 0x2b, 0x47, 0x9d, 0x16, 0x75, 0x35,
-	0x04, 0x61, 0x4d, 0xa1, 0x4b, 0x30, 0x10, 0xd6, 0xfc, 0x96, 0xf2, 0x29, 0x64, 0x49, 0xd4, 0xab,
-	0xac, 0x64, 0x7f, 0xb7, 0x84, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8, 0xe8, 0x6d, 0x18, 0x63, 0xbf,
-	0x94, 0x65, 0x66, 0x31, 0x5f, 0x02, 0x53, 0xd5, 0x11, 0xb9, 0xd9, 0xb2, 0x51, 0x84, 0x4d, 0x52,
-	0x33, 0x1b, 0x30, 0xac, 0x3e, 0xeb, 0xa1, 0x6a, 0xfe, 0xff, 0xba, 0x08, 0x47, 0x32, 0xd6, 0x1c,
-	0x0a, 0x8d, 0x99, 0xb8, 0xd8, 0xe3, 0x52, 0x7d, 0x8f, 0x73, 0x11, 0xb2, 0x07, 0x60, 0x5d, 0xac,
-	0xad, 0x9e, 0x1b, 0xbd, 0x11, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7b, 0xa3, 0xb4, 0xb1, 0x43, 0x1b,
-	0x6a, 0xda, 0x90, 0xea, 0xe9, 0x43, 0x9d, 0xd3, 0x3f, 0xe9, 0x83, 0xa3, 0x59, 0xe6, 0x2a, 0xe8,
-	0x73, 0x30, 0xc0, 0x9c, 0xde, 0xa4, 0xe0, 0xec, 0xc5, 0x5e, 0x0d, 0x5d, 0x66, 0x99, 0xdf, 0x9c,
-	0x08, 0x99, 0x3b, 0x2b, 0x8f, 0x23, 0x5e, 0xd8, 0x75, 0x98, 0x45, 0x9b, 0x2c, 0x94, 0x95, 0xb8,
-	0x3d, 0xe5, 0xf1, 0xf1, 0x91, 0x9e, 0x3b, 0x20, 0xee, 0xdf, 0x30, 0x61, 0xf5, 0x25, 0x8b, 0xbb,
-	0x5b, 0x7d, 0xc9, 0x96, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0x9c, 0xa8, 0xd8, 0xfd, 0x08, 0xe3, 0xb6,
-	0x44, 0xea, 0x00, 0x16, 0x36, 0x44, 0x82, 0xc0, 0x8c, 0x0b, 0x23, 0xda, 0xc0, 0x3c, 0xd4, 0xc5,
-	0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xea, 0x02, 0xfa, 0x59, 0xed, 0xee, 0x17, 0xe7, 0xc1,
-	0x87, 0x0d, 0xde, 0xe9, 0x54, 0xc2, 0x15, 0x31, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x35, 0x63, 0xcd,
-	0xe7, 0x26, 0xcc, 0x32, 0x2f, 0xfc, 0xce, 0xf1, 0xe5, 0xed, 0x9f, 0xb3, 0x20, 0xe1, 0x2c, 0xa6,
-	0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x2c, 0xf4, 0x05, 0x7e, 0x43, 0xf2, 0x53, 0x0a, 0x03, 0xfb,
-	0x0d, 0x82, 0x19, 0x84, 0x62, 0x44, 0xb1, 0x10, 0x6b, 0x54, 0x7f, 0xa0, 0x8b, 0xa7, 0xf7, 0x63,
-	0xd0, 0xdf, 0x20, 0xdb, 0xa4, 0x91, 0xcc, 0x1b, 0x7b, 0x8d, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0xaf,
-	0x0f, 0x4e, 0x77, 0x8c, 0x78, 0x47, 0x19, 0xcc, 0x0d, 0x27, 0x22, 0x77, 0x9c, 0x9d, 0x64, 0xbe,
-	0xc4, 0xcb, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0xdc, 0xe6, 0x39, 0x80, 0x12, 0xc2, 0x61, 0x91, 0xfa,
-	0x47, 0x40, 0x4d, 0x61, 0x63, 0xf1, 0x41, 0x08, 0x1b, 0x9f, 0x07, 0x08, 0xc3, 0x06, 0xb7, 0x09,
-	0xad, 0x0b, 0x8f, 0xf0, 0x38, 0x57, 0x54, 0xf5, 0x9a, 0x80, 0x60, 0x0d, 0x0b, 0x2d, 0xc2, 0x64,
-	0x2b, 0xf0, 0x23, 0x2e, 0x6b, 0x5f, 0xe4, 0x66, 0xd3, 0xfd, 0x66, 0xb0, 0xb1, 0x4a, 0x02, 0x8e,
-	0x53, 0x35, 0xd0, 0x4b, 0x30, 0x22, 0x02, 0x90, 0x55, 0x7c, 0xbf, 0x21, 0xc4, 0x7b, 0xca, 0x92,
-	0xb8, 0x1a, 0x83, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x02, 0xfc, 0xc1, 0xcc, 0x6a, 0x5c, 0x88, 0xaf,
-	0xe1, 0x25, 0x92, 0x15, 0x0c, 0xf5, 0x94, 0xac, 0x20, 0x16, 0x78, 0x0e, 0xf7, 0xac, 0x4f, 0x86,
-	0xae, 0x22, 0xc2, 0xaf, 0xf5, 0xc1, 0x11, 0xb1, 0x70, 0x1e, 0xf6, 0x72, 0xb9, 0x91, 0x5e, 0x2e,
-	0x0f, 0x42, 0x24, 0xfa, 0xc1, 0x9a, 0x39, 0xec, 0x35, 0xf3, 0x93, 0x16, 0x98, 0x3c, 0x24, 0xfa,
-	0x8f, 0x72, 0x13, 0xce, 0xbe, 0x94, 0xcb, 0x93, 0xc6, 0x91, 0xcc, 0xdf, 0x5b, 0xea, 0x59, 0xfb,
-	0x7f, 0xb1, 0xe0, 0xd1, 0xae, 0x14, 0xd1, 0x12, 0x0c, 0x33, 0x46, 0x57, 0x7b, 0x17, 0x3f, 0xa9,
-	0xdc, 0x2a, 0x24, 0x20, 0x87, 0xef, 0x8e, 0x6b, 0xa2, 0xa5, 0x54, 0x66, 0xdf, 0xa7, 0x32, 0x32,
-	0xfb, 0x1e, 0x33, 0x86, 0xe7, 0x3e, 0x53, 0xfb, 0x7e, 0x89, 0xde, 0x38, 0xa6, 0x6f, 0xe6, 0x47,
-	0x0c, 0x71, 0xae, 0x9d, 0x10, 0xe7, 0x22, 0x13, 0x5b, 0xbb, 0x43, 0xde, 0x80, 0x49, 0x16, 0x99,
-	0x94, 0x39, 0xf9, 0x08, 0xa7, 0xce, 0x42, 0x6c, 0xc8, 0x7f, 0x2d, 0x01, 0xc3, 0x29, 0x6c, 0xfb,
-	0x1f, 0x8a, 0x30, 0xc0, 0xb7, 0xdf, 0x21, 0x3c, 0x7c, 0x9f, 0x81, 0x61, 0xb7, 0xd9, 0x6c, 0xf3,
-	0x64, 0xad, 0xfd, 0xb1, 0x59, 0x78, 0x59, 0x16, 0xe2, 0x18, 0x8e, 0x96, 0x85, 0x26, 0xa1, 0x43,
-	0xf0, 0x73, 0xde, 0xf1, 0xd9, 0x45, 0x27, 0x72, 0x38, 0x17, 0xa7, 0xee, 0xd9, 0x58, 0xe7, 0x80,
-	0x3e, 0x0d, 0x10, 0x46, 0x81, 0xeb, 0x6d, 0xd0, 0x32, 0x91, 0x21, 0xe3, 0xe9, 0x0e, 0xd4, 0xaa,
-	0x0a, 0x99, 0xd3, 0x8c, 0xcf, 0x1c, 0x05, 0xc0, 0x1a, 0x45, 0x34, 0x6b, 0xdc, 0xf4, 0x33, 0x89,
-	0xb9, 0x03, 0x4e, 0x35, 0x9e, 0xb3, 0x99, 0x97, 0x61, 0x58, 0x11, 0xef, 0x26, 0x57, 0x1c, 0xd5,
-	0x19, 0xb6, 0x8f, 0xc1, 0x44, 0xa2, 0x6f, 0x07, 0x12, 0x4b, 0xfe, 0xbe, 0x05, 0x13, 0xbc, 0x33,
-	0x4b, 0xde, 0xb6, 0xb8, 0x0d, 0xee, 0xc1, 0xd1, 0x46, 0xc6, 0xa9, 0x2c, 0xa6, 0xbf, 0xf7, 0x53,
-	0x5c, 0x89, 0x21, 0xb3, 0xa0, 0x38, 0xb3, 0x0d, 0x74, 0x9e, 0xee, 0x38, 0x7a, 0xea, 0x3a, 0x0d,
-	0x11, 0x99, 0x64, 0x94, 0xef, 0x36, 0x5e, 0x86, 0x15, 0xd4, 0xfe, 0x5b, 0x0b, 0xa6, 0x78, 0xcf,
-	0xaf, 0x92, 0x1d, 0x75, 0x36, 0x7d, 0x27, 0xfb, 0x2e, 0xd2, 0x84, 0x17, 0x72, 0xd2, 0x84, 0xeb,
-	0x9f, 0x56, 0xec, 0xf8, 0x69, 0x5f, 0xb5, 0x40, 0xac, 0x90, 0x43, 0x90, 0xb4, 0x7c, 0x9f, 0x29,
-	0x69, 0x99, 0xc9, 0xdf, 0x04, 0x39, 0x22, 0x96, 0x7f, 0xb3, 0x60, 0x92, 0x23, 0xc4, 0x56, 0x10,
-	0xdf, 0xd1, 0x79, 0x98, 0x37, 0xbf, 0x28, 0xd3, 0xac, 0xf5, 0x2a, 0xd9, 0x59, 0xf3, 0x2b, 0x4e,
-	0xb4, 0x99, 0xfd, 0x51, 0xc6, 0x64, 0xf5, 0x75, 0x9c, 0xac, 0xba, 0xdc, 0x40, 0x46, 0x42, 0xc8,
-	0x2e, 0x02, 0xe0, 0x83, 0x26, 0x84, 0xb4, 0xff, 0xd1, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca,
-	0x0e, 0xb1, 0x52, 0xed, 0xa2, 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0x03, 0x19, 0x9e, 0x84,
-	0x29, 0x4b, 0xb1, 0xbb, 0x29, 0xcb, 0x01, 0x46, 0xf4, 0xab, 0x83, 0x90, 0x74, 0xeb, 0x44, 0x37,
-	0x61, 0xb4, 0xe6, 0xb4, 0x9c, 0xdb, 0x6e, 0xc3, 0x8d, 0x5c, 0x12, 0x76, 0xb2, 0x73, 0x5b, 0xd0,
-	0xf0, 0x84, 0xf1, 0x81, 0x56, 0x82, 0x0d, 0x3a, 0x68, 0x16, 0xa0, 0x15, 0xb8, 0xdb, 0x6e, 0x83,
-	0x6c, 0x30, 0x81, 0x10, 0x8b, 0x85, 0xc4, 0x8d, 0xee, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x42, 0x90,
-	0x14, 0x1f, 0x72, 0x08, 0x12, 0x38, 0xb4, 0x10, 0x24, 0x7d, 0x07, 0x0a, 0x41, 0x32, 0x74, 0xe0,
-	0x10, 0x24, 0xfd, 0x3d, 0x85, 0x20, 0xc1, 0x70, 0x5c, 0xf2, 0x9e, 0xf4, 0xff, 0xb2, 0xdb, 0x20,
-	0xe2, 0xc1, 0xc1, 0x03, 0x38, 0xcd, 0xec, 0xed, 0x96, 0x8e, 0xe3, 0x4c, 0x0c, 0x9c, 0x53, 0x13,
-	0x7d, 0x1c, 0xa6, 0x9d, 0x46, 0xc3, 0xbf, 0xa3, 0x26, 0x75, 0x29, 0xac, 0x39, 0x8d, 0x38, 0xae,
-	0xdf, 0xd0, 0xfc, 0xa9, 0xbd, 0xdd, 0xd2, 0xf4, 0x5c, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x83,
-	0xe1, 0x56, 0xe0, 0xd7, 0x56, 0x34, 0xdf, 0xf3, 0x33, 0x74, 0x00, 0x2b, 0xb2, 0x70, 0x7f, 0xb7,
-	0x34, 0xa6, 0xfe, 0xb0, 0x0b, 0x3f, 0xae, 0x90, 0x11, 0xdd, 0x63, 0xe4, 0x61, 0x47, 0xf7, 0x18,
-	0x7d, 0xc0, 0xd1, 0x3d, 0xec, 0x2d, 0x38, 0x52, 0x25, 0x81, 0xeb, 0x34, 0xdc, 0x7b, 0x94, 0x27,
-	0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x07, 0x89, 0x53, 0xbf, 0xa7, 0xa0, 0xe7, 0x9a, 0x5c, 0x46, 0x9e,
-	0xf2, 0x31, 0x21, 0xfb, 0xff, 0xb7, 0x60, 0x50, 0xb8, 0x8a, 0x1e, 0x02, 0x67, 0x3a, 0x67, 0xa8,
-	0x64, 0x4a, 0xd9, 0x93, 0xc2, 0x3a, 0x93, 0xab, 0x8c, 0x29, 0x27, 0x94, 0x31, 0x8f, 0x76, 0x22,
-	0xd2, 0x59, 0x0d, 0xf3, 0x9f, 0x15, 0xe9, 0x0b, 0xc1, 0x08, 0x5a, 0xf0, 0xf0, 0x87, 0x60, 0x15,
-	0x06, 0x43, 0xe1, 0x34, 0x5f, 0xc8, 0xf7, 0xe5, 0x49, 0x4e, 0x62, 0x6c, 0x03, 0x29, 0xdc, 0xe4,
-	0x25, 0x91, 0x4c, 0x6f, 0xfc, 0xe2, 0x43, 0xf4, 0xc6, 0xef, 0x16, 0xd6, 0xa1, 0xef, 0x41, 0x84,
-	0x75, 0xb0, 0xbf, 0xce, 0x6e, 0x67, 0xbd, 0xfc, 0x10, 0x18, 0xb7, 0xcb, 0xe6, 0x3d, 0x6e, 0x77,
-	0x58, 0x59, 0xa2, 0x53, 0x39, 0x0c, 0xdc, 0xef, 0x5a, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x6e, 0xee,
-	0x59, 0x18, 0x72, 0xda, 0x75, 0x57, 0xed, 0x65, 0x4d, 0x5b, 0x3c, 0x27, 0xca, 0xb1, 0xc2, 0x40,
-	0x0b, 0x30, 0x45, 0xee, 0xb6, 0x5c, 0xae, 0x86, 0xd7, 0x4d, 0xc7, 0x8b, 0xdc, 0xbf, 0x78, 0x29,
-	0x09, 0xc4, 0x69, 0x7c, 0x15, 0x1a, 0xae, 0x98, 0x1b, 0x1a, 0xee, 0x37, 0x2d, 0x18, 0x51, 0x6e,
-	0xe3, 0x0f, 0x7d, 0xb4, 0xdf, 0x30, 0x47, 0xfb, 0x91, 0x0e, 0xa3, 0x9d, 0x33, 0xcc, 0x7f, 0x53,
-	0x50, 0xfd, 0xad, 0xf8, 0x41, 0xd4, 0x03, 0x97, 0x78, 0xff, 0x6e, 0x2f, 0x17, 0x61, 0xc4, 0x69,
-	0xb5, 0x24, 0x40, 0xda, 0x2f, 0xb2, 0x14, 0x16, 0x71, 0x31, 0xd6, 0x71, 0x94, 0x17, 0x4e, 0x31,
-	0xd7, 0x0b, 0xa7, 0x0e, 0x10, 0x39, 0xc1, 0x06, 0x89, 0x68, 0x99, 0x30, 0xb7, 0xce, 0x3f, 0x6f,
-	0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0xb2, 0x17, 0x5d, 0x0f, 0xf8, 0x33,
-	0x55, 0x0b, 0xc0, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x0c, 0x91, 0xc2, 0xda, 0xe8, 0x37, 0x0d, 0x61,
-	0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0xcb, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0xc0, 0x82, 0xff,
-	0x34, 0xaa, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf5, 0xf0, 0x85, 0x9d, 0x0f, 0x7b, 0xda, 0xb0, 0xee,
-	0xcf, 0x1a, 0xc7, 0x38, 0x44, 0x9f, 0x4c, 0x19, 0x37, 0x3d, 0xd7, 0xe5, 0xd6, 0x38, 0x80, 0x39,
-	0x13, 0xcb, 0x67, 0xc7, 0xb2, 0x7d, 0x95, 0x2b, 0x62, 0x5f, 0x68, 0xf9, 0xec, 0x04, 0x00, 0xc7,
-	0x38, 0x94, 0x61, 0x53, 0x7f, 0xc2, 0x69, 0x14, 0x87, 0x3d, 0x57, 0xd8, 0x21, 0xd6, 0x30, 0xd0,
-	0x05, 0x21, 0xb4, 0xe0, 0xba, 0x87, 0x47, 0x12, 0x42, 0x0b, 0x39, 0x5c, 0x9a, 0xa4, 0xe9, 0x22,
-	0x8c, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0x41, 0x5b, 0xe8, 0x8f, 0xa3, 0xeb, 0x2e, 0xc5, 0xc5,
-	0x58, 0xc7, 0x41, 0x6b, 0x30, 0x11, 0x72, 0x59, 0x9e, 0x4a, 0xb6, 0xc1, 0x65, 0xa2, 0x4f, 0x2b,
-	0x87, 0x7d, 0x13, 0xbc, 0xcf, 0x8a, 0xf8, 0xe9, 0x24, 0xc3, 0x98, 0x24, 0x49, 0xa0, 0xd7, 0x61,
-	0xbc, 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0x36, 0x3e, 0x43, 0x46, 0x2c, 0xcb, 0xf1,
-	0x6b, 0x06, 0x14, 0x27, 0xb0, 0x29, 0x83, 0xa8, 0x97, 0x88, 0x04, 0x31, 0x8e, 0xb7, 0x41, 0xc2,
-	0xe9, 0x61, 0xf6, 0x55, 0x8c, 0x41, 0xbc, 0x96, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x4b, 0x30, 0x2a,
-	0x3f, 0x5f, 0x8b, 0xfa, 0x13, 0x3b, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x8e, 0xc9, 0xff,
-	0x6b, 0x81, 0xb3, 0xbe, 0xee, 0xd6, 0x44, 0x28, 0x0c, 0xee, 0xfc, 0xfd, 0x31, 0xe9, 0x69, 0xba,
-	0x94, 0x85, 0xb4, 0xbf, 0x5b, 0x3a, 0x25, 0x46, 0x2d, 0x13, 0x8e, 0xb3, 0x69, 0xa3, 0x15, 0x38,
-	0xc2, 0x6d, 0x60, 0x16, 0x36, 0x49, 0x6d, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x57,
-	0xd2, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x07, 0xa6, 0x5b, 0xed, 0xdb, 0x0d, 0x37, 0xdc, 0x5c, 0xf5,
-	0x23, 0x66, 0x42, 0x36, 0x57, 0xaf, 0x07, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x91, 0x9a,
-	0x2a, 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x07, 0xc7, 0x12, 0x0b, 0x41, 0x84, 0x5c, 0x19, 0xcf,
-	0x4f, 0xb5, 0x55, 0xcd, 0xaa, 0x20, 0xa2, 0x17, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x15, 0x00,
-	0xb7, 0xb5, 0xec, 0x34, 0xdd, 0x06, 0x7d, 0x8e, 0x1e, 0x61, 0x6b, 0x84, 0x3e, 0x4d, 0xa0, 0x5c,
-	0x91, 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc1, 0xb8, 0xf8, 0xb7, 0x23,
-	0xa6, 0x74, 0x4a, 0x65, 0x65, 0x1d, 0x97, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda,
-	0x80, 0xd3, 0x32, 0x25, 0xac, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xbf, 0xd5, 0x10, 0xf7, 0x29,
-	0x9a, 0xeb, 0x84, 0x88, 0x3b, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xb1, 0x38,
-	0x22, 0xe8, 0xb5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0xc7, 0x5c, 0x2f, 0x6b, 0x55, 0x1f, 0x67,
-	0x84, 0x3e, 0xca, 0x9d, 0xe5, 0x3b, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0xca, 0x70, 0x24,
-	0xe2, 0x05, 0x8b, 0x6e, 0xc8, 0xd3, 0xe7, 0xd0, 0x67, 0xdf, 0x09, 0xd6, 0xdc, 0x09, 0xba, 0x9a,
-	0xd7, 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0xbd, 0x19, 0x80, 0x7e, 0xc3, 0xa2, 0xb5, 0x35, 0x46, 0x1f,
-	0x7d, 0x06, 0x46, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xb9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33,
-	0x41, 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x2d, 0x23, 0xc8, 0xc5, 0x85, 0xde, 0x98, 0xa2,
-	0xde, 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x0d, 0x86, 0x6a, 0x0d, 0x97, 0x78, 0x51, 0xb9,
-	0xd2, 0x29, 0x50, 0xeb, 0x82, 0xc0, 0x11, 0x5b, 0x51, 0x64, 0xbd, 0xe2, 0x65, 0x58, 0x51, 0xb0,
-	0x2f, 0xc1, 0x48, 0xb5, 0x41, 0x48, 0x8b, 0xfb, 0x71, 0xa1, 0xa7, 0xd8, 0xc3, 0x84, 0xb1, 0x96,
-	0x16, 0x63, 0x2d, 0xf5, 0x37, 0x07, 0x63, 0x2a, 0x25, 0xdc, 0xfe, 0xb3, 0x02, 0x94, 0xba, 0x24,
-	0x5f, 0x4b, 0xe8, 0xdb, 0xac, 0x9e, 0xf4, 0x6d, 0x73, 0x30, 0x11, 0xff, 0xd3, 0x45, 0x79, 0xca,
-	0x18, 0xfa, 0xa6, 0x09, 0xc6, 0x49, 0xfc, 0x9e, 0xfd, 0x5a, 0x74, 0x95, 0x5d, 0x5f, 0x57, 0xcf,
-	0x2c, 0x43, 0x55, 0xdf, 0xdf, 0xfb, 0xdb, 0x3b, 0x57, 0xed, 0x6a, 0x7f, 0xbd, 0x00, 0xc7, 0xd4,
-	0x10, 0x7e, 0xef, 0x0e, 0xdc, 0x8d, 0xf4, 0xc0, 0x3d, 0x00, 0xa5, 0xb5, 0x7d, 0x1d, 0x06, 0x78,
-	0xf4, 0xd8, 0x1e, 0x78, 0xfe, 0xc7, 0xcc, 0x40, 0xfe, 0x8a, 0xcd, 0x34, 0x82, 0xf9, 0xff, 0x98,
-	0x05, 0x13, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0xfb, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0xb3,
-	0xd0, 0xb7, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xc5, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0xdf, 0x59,
-	0xd0, 0xbf, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x97, 0xef, 0x42, 0x2f,
-	0xc1, 0x00, 0x59, 0x5f, 0x27, 0xb5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0x31, 0xb0, 0xc4, 0x4a, 0x29,
-	0x13, 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xb7, 0x60, 0x38, 0x72, 0x9b, 0x64, 0xae, 0x5e,
-	0x17, 0x36, 0x01, 0xf7, 0x11, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0xcb, 0x05, 0x80,
-	0x38, 0x5a, 0x5d, 0xb7, 0x4f, 0x9c, 0x4f, 0x69, 0x8b, 0xcf, 0x65, 0x68, 0x8b, 0x51, 0x4c, 0x30,
-	0x43, 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd3, 0x30, 0xf5, 0x1d, 0x64, 0x98, 0x16, 0x60, 0x2a, 0x8e,
-	0xb6, 0x67, 0x06, 0x1b, 0x65, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0xb3, 0x2a,
-	0xe8, 0x98, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x32, 0x4e, 0xb1, 0x3a, 0xbc, 0x90,
-	0xab, 0x0e, 0xff, 0x45, 0x0b, 0x8e, 0x26, 0xdb, 0x61, 0x7e, 0xf7, 0x5f, 0xb4, 0xe0, 0x58, 0x9c,
-	0x7b, 0x28, 0x6d, 0x82, 0xf0, 0x62, 0xc7, 0x40, 0x6a, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64,
-	0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0x5f, 0x1f, 0x4c, 0xe7, 0x45, 0x60, 0x63, 0x9e, 0x46, 0xce,
-	0xdd, 0xea, 0x16, 0xb9, 0x23, 0xfc, 0x39, 0x62, 0x4f, 0x23, 0x5e, 0x8c, 0x25, 0x3c, 0x99, 0x6e,
-	0xaa, 0xd0, 0x63, 0xba, 0xa9, 0x4d, 0x98, 0xba, 0xb3, 0x49, 0xbc, 0x1b, 0x5e, 0xe8, 0x44, 0x6e,
-	0xb8, 0xee, 0x32, 0x05, 0x3a, 0x5f, 0x37, 0xaf, 0x48, 0xaf, 0x8b, 0x5b, 0x49, 0x84, 0xfd, 0xdd,
-	0xd2, 0x69, 0xa3, 0x20, 0xee, 0x32, 0x3f, 0x48, 0x70, 0x9a, 0x68, 0x3a, 0x5b, 0x57, 0xdf, 0x43,
-	0xce, 0xd6, 0xd5, 0x74, 0x85, 0xd9, 0x8d, 0x74, 0x23, 0x61, 0xcf, 0xd6, 0x15, 0x55, 0x8a, 0x35,
-	0x0c, 0xf4, 0x29, 0x40, 0x7a, 0xba, 0x45, 0x23, 0x00, 0xee, 0x73, 0x7b, 0xbb, 0x25, 0xb4, 0x9a,
-	0x82, 0xee, 0xef, 0x96, 0x8e, 0xd0, 0xd2, 0xb2, 0x47, 0x9f, 0xbf, 0x71, 0xd4, 0xc0, 0x0c, 0x42,
-	0xe8, 0x16, 0x4c, 0xd2, 0x52, 0xb6, 0xa3, 0x64, 0x74, 0x5d, 0xfe, 0x64, 0x7d, 0x66, 0x6f, 0xb7,
-	0x34, 0xb9, 0x9a, 0x80, 0xe5, 0x91, 0x4e, 0x11, 0xc9, 0x48, 0xda, 0x35, 0xd4, 0x6b, 0xd2, 0x2e,
-	0xfb, 0x8b, 0x16, 0x9c, 0xa4, 0x17, 0x5c, 0xfd, 0x5a, 0x8e, 0x16, 0xdd, 0x69, 0xb9, 0x5c, 0x4f,
-	0x23, 0xae, 0x1a, 0x26, 0xab, 0xab, 0x94, 0xb9, 0x96, 0x46, 0x41, 0xe9, 0x09, 0xbf, 0xe5, 0x7a,
-	0xf5, 0xe4, 0x09, 0x7f, 0xd5, 0xf5, 0xea, 0x98, 0x41, 0xd4, 0x95, 0x55, 0xcc, 0x8d, 0xd6, 0xff,
-	0x35, 0xba, 0x57, 0x69, 0x5f, 0xbe, 0xa3, 0xdd, 0x40, 0xcf, 0xe8, 0x3a, 0x55, 0x61, 0x3e, 0x99,
-	0xab, 0x4f, 0xfd, 0x82, 0x05, 0xc2, 0xfb, 0xbd, 0x87, 0x3b, 0xf9, 0x6d, 0x18, 0xdd, 0x4e, 0xa7,
-	0xa2, 0x3d, 0x9b, 0x1f, 0x0e, 0x40, 0x24, 0xa0, 0x55, 0x2c, 0xba, 0x91, 0x76, 0xd6, 0xa0, 0x65,
-	0xd7, 0x41, 0x40, 0x17, 0x09, 0xd3, 0x6a, 0x74, 0xef, 0xcd, 0xf3, 0x00, 0x75, 0x86, 0xcb, 0xf2,
-	0xd3, 0x17, 0x4c, 0x8e, 0x6b, 0x51, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x7a, 0x11, 0x46, 0x64, 0xea,
-	0xd3, 0xb6, 0xd7, 0x8b, 0xec, 0x51, 0x67, 0x9c, 0x0a, 0x5d, 0x19, 0xa7, 0x77, 0x60, 0x2a, 0x20,
-	0xb5, 0x76, 0x10, 0xba, 0xdb, 0x44, 0x82, 0xc5, 0x26, 0x99, 0xe5, 0xc9, 0x22, 0x12, 0xc0, 0x7d,
-	0x16, 0x22, 0x2b, 0x51, 0xc8, 0x94, 0xc6, 0x69, 0x42, 0xe8, 0x02, 0x0c, 0x33, 0xd1, 0x7b, 0x25,
-	0x16, 0x08, 0x2b, 0xc1, 0xd7, 0x8a, 0x04, 0xe0, 0x18, 0x87, 0x3d, 0x0e, 0xda, 0xb7, 0x19, 0x7a,
-	0xc2, 0x13, 0xbc, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x8f, 0xc3, 0x24, 0xaf, 0x17, 0xf8, 0x2d, 0x67,
-	0x83, 0xab, 0x04, 0xfb, 0x55, 0x78, 0x9d, 0xc9, 0x95, 0x04, 0x6c, 0x7f, 0xb7, 0x74, 0x34, 0x59,
-	0xc6, 0xba, 0x9d, 0xa2, 0xc2, 0x2c, 0xff, 0x78, 0x23, 0xf4, 0xce, 0x48, 0x19, 0x0c, 0xc6, 0x20,
-	0xac, 0xe3, 0xd9, 0xff, 0x6a, 0xc1, 0x94, 0x36, 0x55, 0x3d, 0xe7, 0xeb, 0x30, 0x06, 0xa9, 0xd0,
-	0xc3, 0x20, 0x1d, 0x2c, 0xda, 0x43, 0xe6, 0x0c, 0xf7, 0x3d, 0xa0, 0x19, 0xb6, 0x3f, 0x03, 0x28,
-	0x9d, 0x57, 0x17, 0xbd, 0xc9, 0x0d, 0xf9, 0xdd, 0x80, 0xd4, 0x3b, 0x29, 0xfc, 0xf5, 0xc8, 0x39,
-	0xd2, 0x73, 0x95, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xe3, 0x7d, 0x30, 0x99, 0x8c, 0xd5, 0x81, 0xae,
-	0xc0, 0x00, 0xe7, 0xd2, 0x05, 0xf9, 0x0e, 0xf6, 0x64, 0x5a, 0x84, 0x0f, 0x9e, 0x4b, 0x87, 0x73,
-	0xf7, 0xa2, 0x3e, 0x7a, 0x07, 0x46, 0xea, 0xfe, 0x1d, 0xef, 0x8e, 0x13, 0xd4, 0xe7, 0x2a, 0x65,
-	0x71, 0x42, 0x64, 0x0a, 0xa0, 0x16, 0x63, 0x34, 0x3d, 0x6a, 0x08, 0xb3, 0x9d, 0x88, 0x41, 0x58,
-	0x27, 0x87, 0xd6, 0x58, 0x7a, 0xa7, 0x75, 0x77, 0x63, 0xc5, 0x69, 0x75, 0xf2, 0xea, 0x5a, 0x90,
-	0x48, 0x1a, 0xe5, 0x31, 0x91, 0x03, 0x8a, 0x03, 0x70, 0x4c, 0x08, 0x7d, 0x0e, 0x8e, 0x84, 0x39,
-	0x2a, 0xb1, 0xbc, 0x34, 0xeb, 0x9d, 0xb4, 0x44, 0x5c, 0x98, 0x92, 0xa5, 0x3c, 0xcb, 0x6a, 0x06,
-	0xdd, 0x05, 0x24, 0x44, 0xcf, 0x6b, 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0xd3, 0x3f,
-	0x7d, 0x38, 0x5b, 0x4e, 0x90, 0xc4, 0xd6, 0xda, 0x66, 0xe1, 0x85, 0xd3, 0x18, 0x38, 0xa3, 0x0d,
-	0xfb, 0x0b, 0x7d, 0x30, 0x23, 0x13, 0x59, 0x67, 0x78, 0xaf, 0x7c, 0xde, 0x4a, 0xb8, 0xaf, 0xbc,
-	0x92, 0x7f, 0xd0, 0x3f, 0x34, 0x27, 0x96, 0x2f, 0xa5, 0x9d, 0x58, 0x5e, 0x3b, 0x60, 0x37, 0x1e,
-	0x98, 0x2b, 0xcb, 0xf7, 0xac, 0xff, 0xc9, 0xde, 0x51, 0x30, 0xae, 0x66, 0x84, 0x79, 0xec, 0xf6,
-	0x8a, 0x54, 0x1d, 0xe5, 0x3c, 0xff, 0xaf, 0x08, 0x1c, 0xe3, 0xb2, 0x1f, 0x95, 0x11, 0xde, 0xd9,
-	0x39, 0xab, 0xe8, 0x50, 0x9a, 0xa4, 0xd9, 0x8a, 0x76, 0x16, 0xdd, 0x40, 0xf4, 0x38, 0x93, 0xe6,
-	0x92, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0x6d, 0x98, 0xda, 0x60, 0x11, 0x9f, 0xb4,
-	0x9c, 0xd2, 0xe2, 0x5c, 0xc8, 0xdc, 0xb7, 0x97, 0x17, 0x96, 0xf2, 0x13, 0x50, 0xf3, 0xc7, 0x5f,
-	0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa8, 0x73, 0x27, 0x5c, 0x6a, 0x38, 0x61, 0xe4, 0xd6,
-	0xe6, 0x1b, 0x7e, 0x6d, 0xab, 0x1a, 0xf9, 0x81, 0x4c, 0x16, 0x99, 0xf9, 0xf6, 0x9a, 0xbb, 0x55,
-	0x4d, 0xe1, 0x1b, 0xcd, 0x4f, 0xef, 0xed, 0x96, 0x8e, 0x66, 0x61, 0xe1, 0xcc, 0xb6, 0xd0, 0x2a,
-	0x0c, 0x6e, 0xb8, 0x11, 0x26, 0x2d, 0x5f, 0x9c, 0x16, 0x99, 0x47, 0xe1, 0x65, 0x8e, 0x62, 0xb4,
-	0xc4, 0x22, 0x52, 0x09, 0x00, 0x96, 0x44, 0xd0, 0x9b, 0xea, 0x12, 0x18, 0xc8, 0x17, 0xc0, 0xa6,
-	0x6d, 0xef, 0x32, 0xaf, 0x81, 0xd7, 0xa1, 0xe8, 0xad, 0x87, 0x9d, 0x62, 0xf1, 0xac, 0x2e, 0x1b,
-	0xf2, 0xb3, 0xf9, 0x41, 0xfa, 0x34, 0x5e, 0x5d, 0xae, 0x62, 0x5a, 0x91, 0xb9, 0xbd, 0x86, 0xb5,
-	0xd0, 0x15, 0x89, 0xa7, 0x32, 0xbd, 0x80, 0xcb, 0xd5, 0x85, 0x6a, 0xd9, 0xa0, 0xc1, 0xa2, 0x1a,
-	0xb2, 0x62, 0xcc, 0xab, 0xa3, 0x9b, 0x30, 0xbc, 0xc1, 0x0f, 0xbe, 0xf5, 0x50, 0x24, 0xb3, 0xcf,
-	0xbc, 0x8c, 0x2e, 0x4b, 0x24, 0x83, 0x1e, 0xbb, 0x32, 0x14, 0x08, 0xc7, 0xa4, 0xd0, 0x17, 0x2c,
-	0x38, 0xd6, 0x4a, 0x48, 0x50, 0x99, 0xb3, 0x9a, 0x30, 0x53, 0xcb, 0x74, 0x00, 0xa8, 0x64, 0x55,
-	0x30, 0x1a, 0x64, 0xea, 0x97, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0xe0, 0x76, 0xbd, 0x53,
-	0xae, 0xa2, 0x44, 0x60, 0x22, 0x3e, 0xd0, 0x78, 0x7e, 0x11, 0xd3, 0x8a, 0x68, 0x0d, 0x60, 0xbd,
-	0x41, 0x44, 0xc4, 0x47, 0x61, 0x14, 0x95, 0x79, 0xfb, 0x2f, 0x2b, 0x2c, 0x41, 0x87, 0xbd, 0x44,
-	0xe3, 0x52, 0xac, 0xd1, 0xa1, 0x4b, 0xa9, 0xe6, 0x7a, 0x75, 0x12, 0x30, 0xe5, 0x56, 0xce, 0x52,
-	0x5a, 0x60, 0x18, 0xe9, 0xa5, 0xc4, 0xcb, 0xb1, 0xa0, 0xc0, 0x68, 0x91, 0xd6, 0xe6, 0x7a, 0xd8,
-	0x29, 0x2b, 0xc6, 0x02, 0x69, 0x6d, 0x26, 0x16, 0x14, 0xa7, 0xc5, 0xca, 0xb1, 0xa0, 0x40, 0xb7,
-	0xcc, 0x3a, 0xdd, 0x40, 0x24, 0x98, 0x9e, 0xc8, 0xdf, 0x32, 0xcb, 0x1c, 0x25, 0xbd, 0x65, 0x04,
-	0x00, 0x4b, 0x22, 0xe8, 0xd3, 0x26, 0xb7, 0x33, 0xc9, 0x68, 0x3e, 0xd3, 0x85, 0xdb, 0x31, 0xe8,
-	0x76, 0xe6, 0x77, 0x5e, 0x81, 0xc2, 0x7a, 0x8d, 0x29, 0xc5, 0x72, 0x74, 0x06, 0xcb, 0x0b, 0x06,
-	0x35, 0x16, 0x65, 0x7e, 0x79, 0x01, 0x17, 0xd6, 0x6b, 0x74, 0xe9, 0x3b, 0xf7, 0xda, 0x01, 0x59,
-	0x76, 0x1b, 0x44, 0x64, 0xc8, 0xc8, 0x5c, 0xfa, 0x73, 0x12, 0x29, 0xbd, 0xf4, 0x15, 0x08, 0xc7,
-	0xa4, 0x28, 0xdd, 0x98, 0x07, 0x3b, 0x92, 0x4f, 0x57, 0xb1, 0x5a, 0x69, 0xba, 0x99, 0x5c, 0xd8,
-	0x16, 0x8c, 0x6d, 0x87, 0xad, 0x4d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, 0x89, 0x54, 0x71, 0x53,
-	0x20, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, 0xea, 0xc4, 0xb0, 0x49,
-	0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, 0x19, 0x11, 0xe7, 0xf8,
-	0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0xf1, 0x2e, 0x83, 0x9d, 0xea, 0x6f,
-	0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb5, 0xe9, 0x47, 0xbe, 0x97, 0xb8, 0xe4,
-	0x4e, 0xe4, 0x5f, 0x34, 0x95, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, 0x33, 0xdb, 0xa2, 0x1f,
-	0xd7, 0x92, 0x91, 0x01, 0x45, 0x16, 0x8f, 0xa7, 0x72, 0x02, 0x6b, 0xa6, 0xc3, 0x07, 0xf2, 0x8f,
-	0x53, 0x20, 0x1c, 0x93, 0x42, 0x75, 0x18, 0x6f, 0x19, 0x11, 0x67, 0x59, 0x36, 0x92, 0x1c, 0xbe,
-	0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, 0xf7, 0xb8, 0xab, 0x1f,
-	0x4b, 0x56, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, 0x4b, 0x22, 0x74, 0x34,
-	0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x9c, 0x3f, 0x79, 0x0a, 0xf6, 0x2c, 0x35, 0x91, 0x0c, 0xb3, 0x2e,
-	0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xca, 0x3f, 0xc9, 0x93, 0xd7, 0x1d, 0x3b,
-	0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0xf2, 0x95, 0xe4, 0xf4, 0x4b, 0x85,
-	0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, 0xdd, 0x99, 0x0e, 0x57,
-	0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, 0x4c, 0xe7, 0x7d, 0x1b,
-	0xeb, 0xd0, 0x2a, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, 0x56, 0xcf, 0x01, 0x87,
-	0x2f, 0xc3, 0x94, 0x72, 0x47, 0x6c, 0xb8, 0xb5, 0x1d, 0x2d, 0x49, 0xa9, 0x0a, 0xcd, 0x53, 0x4d,
-	0x22, 0xe0, 0x74, 0x1d, 0x34, 0x07, 0x13, 0x46, 0x61, 0x79, 0x51, 0x3c, 0xff, 0xe3, 0x4c, 0x1b,
-	0x26, 0x18, 0x27, 0xf1, 0xed, 0xdf, 0xb0, 0xe0, 0x44, 0x4e, 0xfe, 0xfb, 0x9e, 0xe3, 0xe9, 0xae,
-	0xc3, 0x44, 0xcb, 0xac, 0xda, 0x25, 0x04, 0xb8, 0x91, 0x65, 0x5f, 0xf5, 0x35, 0x01, 0xc0, 0x49,
-	0xa2, 0xf6, 0xaf, 0x15, 0xe0, 0x74, 0x47, 0xfb, 0x7a, 0x84, 0xe1, 0xf8, 0x46, 0x33, 0x74, 0x16,
-	0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x8b, 0xd4, 0x34, 0x2d, 0x28, 0x33, 0x54, 0xbf,
-	0xbc, 0x52, 0x9d, 0x4b, 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x19, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x3d,
-	0x71, 0xd3, 0xf4, 0x70, 0x46, 0x0d, 0xf4, 0x32, 0x8c, 0x29, 0xbb, 0x7d, 0x6d, 0xc6, 0xd9, 0x05,
-	0x81, 0x75, 0x00, 0x36, 0xf1, 0xd0, 0x45, 0x9e, 0x82, 0x49, 0x24, 0xeb, 0x12, 0x2a, 0xd3, 0x09,
-	0x99, 0x5f, 0x49, 0x14, 0x63, 0x1d, 0x67, 0xfe, 0xd2, 0x5f, 0x7c, 0xeb, 0xcc, 0x87, 0xfe, 0xea,
-	0x5b, 0x67, 0x3e, 0xf4, 0xb7, 0xdf, 0x3a, 0xf3, 0xa1, 0x1f, 0xda, 0x3b, 0x63, 0xfd, 0xc5, 0xde,
-	0x19, 0xeb, 0xaf, 0xf6, 0xce, 0x58, 0x7f, 0xbb, 0x77, 0xc6, 0xfa, 0xdf, 0xf7, 0xce, 0x58, 0x5f,
-	0xfe, 0x3f, 0xce, 0x7c, 0xe8, 0x6d, 0x14, 0x47, 0xa8, 0xbe, 0x40, 0x67, 0xe7, 0xc2, 0xf6, 0xc5,
-	0xff, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf5, 0xf1, 0x8c, 0x4c, 0x2d, 0x26, 0x01, 0x00,
+	// 16665 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x5b, 0x90, 0x5c, 0x49,
+	0x76, 0x18, 0xb6, 0xb7, 0xaa, 0x9f, 0xa7, 0xdf, 0x89, 0x57, 0xa1, 0x07, 0x40, 0x61, 0xee, 0xcc,
+	0x60, 0x30, 0x3b, 0x33, 0x8d, 0xc5, 0x3c, 0x76, 0xb1, 0x33, 0xb3, 0xc3, 0xe9, 0x27, 0xd0, 0x03,
+	0x74, 0xa3, 0x26, 0xab, 0x01, 0xec, 0x63, 0x76, 0xb5, 0x17, 0x55, 0xd9, 0xdd, 0x77, 0xbb, 0xea,
+	0xde, 0x9a, 0x7b, 0x6f, 0x35, 0xd0, 0x30, 0x15, 0xa4, 0x56, 0xe6, 0x4a, 0x4b, 0xd2, 0x11, 0x1b,
+	0x0a, 0x4b, 0x72, 0x90, 0x0a, 0x7e, 0xe8, 0x45, 0xd2, 0xb4, 0x64, 0x52, 0xa4, 0x45, 0x59, 0x14,
+	0x29, 0xda, 0x96, 0x23, 0x68, 0x7f, 0xc8, 0x14, 0x23, 0xcc, 0x65, 0x58, 0xe1, 0x96, 0xd9, 0xb6,
+	0x42, 0xc1, 0x0f, 0x53, 0x0a, 0xda, 0x1f, 0x76, 0x87, 0x6c, 0x2a, 0xf2, 0x79, 0x33, 0xef, 0xab,
+	0xaa, 0x31, 0x40, 0xef, 0x70, 0x63, 0xfe, 0xaa, 0xf2, 0x9c, 0x3c, 0x99, 0x37, 0x1f, 0x27, 0x4f,
+	0x9e, 0x73, 0xf2, 0x1c, 0xb0, 0x77, 0xae, 0x85, 0x73, 0xae, 0x7f, 0xc5, 0xe9, 0xb8, 0x57, 0x1a,
+	0x7e, 0x40, 0xae, 0xec, 0x5e, 0xbd, 0xb2, 0x45, 0x3c, 0x12, 0x38, 0x11, 0x69, 0xce, 0x75, 0x02,
+	0x3f, 0xf2, 0x11, 0xe2, 0x38, 0x73, 0x4e, 0xc7, 0x9d, 0xa3, 0x38, 0x73, 0xbb, 0x57, 0x67, 0x5f,
+	0xdd, 0x72, 0xa3, 0xed, 0xee, 0xfd, 0xb9, 0x86, 0xdf, 0xbe, 0xb2, 0xe5, 0x6f, 0xf9, 0x57, 0x18,
+	0xea, 0xfd, 0xee, 0x26, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x1b, 0x71, 0x33, 0x6d,
+	0xa7, 0xb1, 0xed, 0x7a, 0x24, 0xd8, 0xbb, 0xd2, 0xd9, 0xd9, 0x62, 0xed, 0x06, 0x24, 0xf4, 0xbb,
+	0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, 0x5e, 0x69, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0x95,
+	0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, 0xba, 0x99, 0xcf, 0xf7, 0xaa, 0x10, 0x36, 0xb6, 0x49,
+	0xdb, 0x49, 0xd5, 0x7b, 0x3d, 0xaf, 0x5e, 0x37, 0x72, 0x5b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20,
+	0x59, 0xc9, 0xfe, 0xbe, 0x05, 0x17, 0xe7, 0xef, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0x85,
+	0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0,
+	0x57, 0x60, 0x64, 0x97, 0xfd, 0x5f, 0x5d, 0xaa, 0x58, 0x17, 0xad, 0xcb, 0xa3, 0x0b, 0xd3, 0xbf,
+	0xb3, 0x5f, 0xfd, 0xcc, 0xc1, 0x7e, 0x75, 0xe4, 0xae, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x82, 0xa1,
+	0xcd, 0x70, 0x63, 0xaf, 0x43, 0x2a, 0x25, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x56, 0xea, 0xb4, 0x14,
+	0x0b, 0x28, 0xba, 0x02, 0xa3, 0x1d, 0x27, 0x88, 0xdc, 0xc8, 0xf5, 0xbd, 0x4a, 0xf9, 0xa2, 0x75,
+	0x79, 0x70, 0x61, 0x46, 0xa0, 0x8e, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xb4, 0x1b, 0x01, 0x71, 0x9a,
+	0xb7, 0xbd, 0xd6, 0x5e, 0x65, 0xe0, 0xa2, 0x75, 0x79, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61,
+	0xd8, 0x3f, 0x53, 0x82, 0x91, 0xf9, 0xcd, 0x4d, 0xd7, 0x73, 0xa3, 0x3d, 0x74, 0x17, 0xc6, 0x3d,
+	0xbf, 0x49, 0xe4, 0x7f, 0xf6, 0x15, 0x63, 0xaf, 0x5d, 0x9c, 0x4b, 0x2f, 0xa5, 0xb9, 0x75, 0x0d,
+	0x6f, 0x61, 0xfa, 0x60, 0xbf, 0x3a, 0xae, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd6, 0xf1, 0x9b,
+	0x8a, 0x6c, 0x89, 0x91, 0xad, 0x66, 0x91, 0xad, 0xc5, 0x68, 0x0b, 0x53, 0x07, 0xfb, 0xd5, 0x31,
+	0xad, 0x00, 0xeb, 0x44, 0xd0, 0x7d, 0x98, 0xa2, 0x7f, 0xbd, 0xc8, 0x55, 0x74, 0xcb, 0x8c, 0xee,
+	0x73, 0x79, 0x74, 0x35, 0xd4, 0x85, 0x13, 0x07, 0xfb, 0xd5, 0xa9, 0x44, 0x21, 0x4e, 0x12, 0xb4,
+	0x7f, 0xda, 0x82, 0xa9, 0xf9, 0x4e, 0x67, 0x3e, 0x68, 0xfb, 0x41, 0x2d, 0xf0, 0x37, 0xdd, 0x16,
+	0x41, 0x5f, 0x80, 0x81, 0x88, 0xce, 0x1a, 0x9f, 0xe1, 0xe7, 0xc4, 0xd0, 0x0e, 0xd0, 0xb9, 0x3a,
+	0xdc, 0xaf, 0x9e, 0x48, 0xa0, 0xb3, 0xa9, 0x64, 0x15, 0xd0, 0x7b, 0x30, 0xdd, 0xf2, 0x1b, 0x4e,
+	0x6b, 0xdb, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, 0xf2, 0x60, 0xbf, 0x3a, 0x7d, 0x2b, 0x01, 0xc3,
+	0x29, 0x6c, 0xfb, 0x11, 0x4c, 0xce, 0x47, 0x91, 0xd3, 0xd8, 0x26, 0x4d, 0xbe, 0xa0, 0xd0, 0x1b,
+	0x30, 0xe0, 0x39, 0x6d, 0xd9, 0x99, 0x8b, 0xb2, 0x33, 0xeb, 0x4e, 0x9b, 0x76, 0x66, 0xfa, 0x8e,
+	0xe7, 0x7e, 0xd4, 0x15, 0x8b, 0x94, 0x96, 0x61, 0x86, 0x8d, 0x5e, 0x03, 0x68, 0x92, 0x5d, 0xb7,
+	0x41, 0x6a, 0x4e, 0xb4, 0x2d, 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x52, 0x10, 0xac, 0x61, 0xd9, 0x0f,
+	0x61, 0x74, 0x7e, 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x1d, 0x98, 0xea, 0x04, 0x64, 0x93,
+	0x04, 0xaa, 0xa8, 0x62, 0x5d, 0x2c, 0x5f, 0x1e, 0x7b, 0xed, 0x72, 0xe6, 0xd8, 0x9b, 0xa8, 0xcb,
+	0x5e, 0x14, 0xec, 0x2d, 0x9c, 0x11, 0xed, 0x4d, 0x25, 0xa0, 0x38, 0x49, 0xd9, 0xfe, 0x67, 0x25,
+	0x38, 0x35, 0xff, 0xa8, 0x1b, 0x90, 0x25, 0x37, 0xdc, 0x49, 0x6e, 0xb8, 0xa6, 0x1b, 0xee, 0xac,
+	0xc7, 0x23, 0xa0, 0x56, 0xfa, 0x92, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0a, 0xc3, 0xf4, 0xf7, 0x1d,
+	0xbc, 0x2a, 0x3e, 0xf9, 0x84, 0x40, 0x1e, 0x5b, 0x72, 0x22, 0x67, 0x89, 0x83, 0xb0, 0xc4, 0x41,
+	0x6b, 0x30, 0xd6, 0x60, 0xfc, 0x61, 0x6b, 0xcd, 0x6f, 0x12, 0xb6, 0xb6, 0x46, 0x17, 0x5e, 0xa6,
+	0xe8, 0x8b, 0x71, 0xf1, 0xe1, 0x7e, 0xb5, 0xc2, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47,
+	0xb6, 0xda, 0xee, 0x03, 0x8c, 0x12, 0x64, 0x6c, 0xf5, 0xcb, 0xda, 0xce, 0x1d, 0x64, 0x3b, 0x77,
+	0x3c, 0x7b, 0xd7, 0xa2, 0xab, 0x30, 0xb0, 0xe3, 0x7a, 0xcd, 0xca, 0x10, 0xa3, 0x75, 0x9e, 0xce,
+	0xf9, 0x4d, 0xd7, 0x6b, 0x1e, 0xee, 0x57, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0xff,
+	0xcb, 0x82, 0x2a, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63,
+	0x40, 0x5f, 0x03, 0x08, 0x49, 0x23, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0xd4, 0x15, 0x04, 0x6b,
+	0x58, 0x94, 0x3f, 0x85, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x9f, 0xea, 0x12, 0x80,
+	0x63, 0x1c, 0x83, 0x3f, 0x95, 0x7b, 0xf1, 0x27, 0xf4, 0x25, 0x98, 0x8a, 0x1b, 0x0b, 0x3b, 0x4e,
+	0x43, 0x0e, 0x20, 0xdb, 0xc1, 0x75, 0x13, 0x84, 0x93, 0xb8, 0xf6, 0x7f, 0x6e, 0x89, 0xc5, 0x43,
+	0xbf, 0xfa, 0x13, 0xfe, 0xad, 0xf6, 0x3f, 0xb2, 0x60, 0x78, 0xc1, 0xf5, 0x9a, 0xae, 0xb7, 0x85,
+	0xbe, 0x09, 0x23, 0xf4, 0xa8, 0x6c, 0x3a, 0x91, 0x23, 0xd8, 0xf0, 0xe7, 0xb4, 0xbd, 0xa5, 0x4e,
+	0xae, 0xb9, 0xce, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0xef, 0x7f, 0x8b,
+	0x34, 0xa2, 0x35, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x28, 0x72,
+	0x82, 0x2d, 0x12, 0x09, 0x7e, 0x9c, 0xc9, 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24,
+	0x3e, 0xa5, 0x36, 0x58, 0x55, 0x2c, 0x48, 0xd8, 0xff, 0xdf, 0x30, 0x9c, 0x5d, 0xac, 0xaf, 0xe6,
+	0xac, 0xab, 0x4b, 0x30, 0xd4, 0x0c, 0xdc, 0x5d, 0x12, 0x88, 0x71, 0x56, 0x54, 0x96, 0x58, 0x29,
+	0x16, 0x50, 0x74, 0x0d, 0xc6, 0xf9, 0xf9, 0x78, 0xc3, 0xf1, 0x9a, 0x31, 0x7b, 0x14, 0xd8, 0xe3,
+	0x77, 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x25, 0x36, 0x63, 0xde, 0xd9, 0xfb, 0x5d,
+	0x0b, 0xa6, 0x79, 0x33, 0xf3, 0x51, 0x14, 0xb8, 0xf7, 0xbb, 0x11, 0x09, 0x2b, 0x83, 0x8c, 0xd3,
+	0x2d, 0x66, 0x8d, 0x56, 0xee, 0x08, 0xcc, 0xdd, 0x4d, 0x50, 0xe1, 0x4c, 0xb0, 0x22, 0xda, 0x9d,
+	0x4e, 0x82, 0x71, 0xaa, 0x59, 0xf4, 0x17, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16,
+	0x09, 0x6a, 0xdd, 0xfb, 0x2d, 0x37, 0xdc, 0xe6, 0xeb, 0x14, 0x93, 0x4d, 0xc6, 0x09, 0x72, 0xe6,
+	0x50, 0x21, 0x89, 0x39, 0xbc, 0x70, 0xb0, 0x5f, 0x9d, 0x5d, 0xcc, 0x25, 0x85, 0x0b, 0x9a, 0x41,
+	0x3b, 0x80, 0xe8, 0xc9, 0x5e, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0xdc, 0x7f, 0xe3, 0xa7, 0x0f,
+	0xf6, 0xab, 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x1f, 0xc1, 0x49, 0x5a, 0x9a, 0xfa, 0xd6,
+	0x91, 0xfe, 0x9b, 0xab, 0x1c, 0xec, 0x57, 0x4f, 0xae, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0x8f,
+	0x5b, 0x70, 0x36, 0xfe, 0xfc, 0xe5, 0x87, 0x1d, 0xc7, 0x6b, 0xc6, 0x0d, 0x8f, 0xf6, 0xdf, 0x30,
+	0xe5, 0xc9, 0x67, 0x17, 0xf3, 0x28, 0xe1, 0xfc, 0x46, 0x90, 0x07, 0x27, 0x68, 0xd7, 0x92, 0x6d,
+	0x43, 0xff, 0x6d, 0x9f, 0x39, 0xd8, 0xaf, 0x9e, 0x58, 0x4f, 0xd3, 0xc0, 0x59, 0x84, 0x67, 0x17,
+	0xe1, 0x54, 0xe6, 0xea, 0x44, 0xd3, 0x50, 0xde, 0x21, 0x5c, 0x08, 0x1c, 0xc5, 0xf4, 0x27, 0x3a,
+	0x09, 0x83, 0xbb, 0x4e, 0xab, 0x2b, 0x36, 0x26, 0xe6, 0x7f, 0xde, 0x2a, 0x5d, 0xb3, 0xec, 0xff,
+	0xbe, 0x0c, 0x53, 0x8b, 0xf5, 0xd5, 0xc7, 0xda, 0xf5, 0xfa, 0xb1, 0x57, 0x2a, 0x3c, 0xf6, 0xe2,
+	0x43, 0xb4, 0x9c, 0x7b, 0x88, 0xfe, 0x58, 0xc6, 0x96, 0x1d, 0x60, 0x5b, 0xf6, 0x8b, 0x39, 0x5b,
+	0xf6, 0x09, 0x6f, 0xd4, 0xdd, 0x9c, 0x55, 0x3b, 0xc8, 0x26, 0x30, 0x53, 0x42, 0x62, 0xb2, 0x5f,
+	0x92, 0xd5, 0x1e, 0x71, 0xe9, 0x3e, 0x99, 0x79, 0x6c, 0xc0, 0xf8, 0xa2, 0xd3, 0x71, 0xee, 0xbb,
+	0x2d, 0x37, 0x72, 0x49, 0x88, 0x5e, 0x84, 0xb2, 0xd3, 0x6c, 0x32, 0xe9, 0x6e, 0x74, 0xe1, 0xd4,
+	0xc1, 0x7e, 0xb5, 0x3c, 0xdf, 0xa4, 0x62, 0x06, 0x28, 0xac, 0x3d, 0x4c, 0x31, 0xd0, 0x67, 0x61,
+	0xa0, 0x19, 0xf8, 0x9d, 0x4a, 0x89, 0x61, 0xd2, 0x5d, 0x3e, 0xb0, 0x14, 0xf8, 0x9d, 0x04, 0x2a,
+	0xc3, 0xb1, 0x7f, 0xbb, 0x04, 0xe7, 0x16, 0x49, 0x67, 0x7b, 0xa5, 0x9e, 0x73, 0x5e, 0x5c, 0x86,
+	0x91, 0xb6, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0xd6, 0x44, 0x19, 0x56, 0x50,
+	0x74, 0x11, 0x06, 0x3a, 0xb1, 0x10, 0x3b, 0x2e, 0x05, 0x60, 0x26, 0xbe, 0x32, 0x08, 0xc5, 0xe8,
+	0x86, 0x24, 0x10, 0x2b, 0x46, 0x61, 0xdc, 0x09, 0x49, 0x80, 0x19, 0x24, 0x96, 0x04, 0xa8, 0x8c,
+	0x20, 0x4e, 0x84, 0x84, 0x24, 0x40, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x18, 0x0d, 0x13, 0x33, 0xdb,
+	0xd7, 0xd6, 0x9c, 0x60, 0xa2, 0x82, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0x82, 0x0d, 0xf5, 0x14, 0x15,
+	0x7e, 0xa3, 0x04, 0x88, 0x0f, 0xe1, 0x9f, 0xb1, 0x81, 0xbb, 0x93, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4,
+	0x93, 0x1a, 0xbd, 0xff, 0xdb, 0x82, 0x73, 0x8b, 0xae, 0xd7, 0x24, 0x41, 0xce, 0x02, 0x7c, 0x3a,
+	0x57, 0xf9, 0xa3, 0x09, 0x29, 0xc6, 0x12, 0x1b, 0x78, 0x02, 0x4b, 0xcc, 0xfe, 0xb7, 0x16, 0x20,
+	0xfe, 0xd9, 0x9f, 0xb8, 0x8f, 0xbd, 0x93, 0xfe, 0xd8, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9,
+	0xd8, 0x72, 0x89, 0x17, 0xad, 0xd6, 0x16, 0x7d, 0x6f, 0xd3, 0xdd, 0x42, 0x6f, 0xc1, 0x64, 0xe4,
+	0xb6, 0x89, 0xdf, 0x8d, 0xea, 0xa4, 0xe1, 0x7b, 0xec, 0xe6, 0x6a, 0x5d, 0x1e, 0x5c, 0x40, 0x07,
+	0xfb, 0xd5, 0xc9, 0x0d, 0x03, 0x82, 0x13, 0x98, 0xf6, 0xdf, 0xa5, 0x7c, 0xab, 0xd5, 0x0d, 0x23,
+	0x12, 0x6c, 0x04, 0xdd, 0x30, 0x5a, 0xe8, 0x52, 0xd9, 0xb3, 0x16, 0xf8, 0xb4, 0x3b, 0xae, 0xef,
+	0xa1, 0x73, 0xc6, 0x75, 0x7c, 0x44, 0x5e, 0xc5, 0xc5, 0xb5, 0x7b, 0x0e, 0x20, 0x74, 0xb7, 0x3c,
+	0x12, 0x68, 0xd7, 0x87, 0x49, 0xb6, 0x55, 0x54, 0x29, 0xd6, 0x30, 0x50, 0x0b, 0x26, 0x5a, 0xce,
+	0x7d, 0xd2, 0xaa, 0x93, 0x16, 0x69, 0x44, 0x7e, 0x20, 0xf4, 0x1b, 0xaf, 0xf7, 0x77, 0x0f, 0xb8,
+	0xa5, 0x57, 0x5d, 0x98, 0x39, 0xd8, 0xaf, 0x4e, 0x18, 0x45, 0xd8, 0x24, 0x4e, 0x59, 0x87, 0xdf,
+	0xa1, 0x5f, 0xe1, 0xb4, 0xf4, 0xcb, 0xe7, 0x6d, 0x51, 0x86, 0x15, 0x54, 0xb1, 0x8e, 0x81, 0x3c,
+	0xd6, 0x61, 0xff, 0x4b, 0xba, 0xd0, 0xfc, 0x76, 0xc7, 0xf7, 0x88, 0x17, 0x2d, 0xfa, 0x5e, 0x93,
+	0x6b, 0xa6, 0xde, 0x32, 0x54, 0x27, 0x97, 0x12, 0xaa, 0x93, 0xd3, 0xe9, 0x1a, 0x9a, 0xf6, 0xe4,
+	0x8b, 0x30, 0x14, 0x46, 0x4e, 0xd4, 0x0d, 0xc5, 0xc0, 0x3d, 0x2b, 0x97, 0x5d, 0x9d, 0x95, 0x1e,
+	0xee, 0x57, 0xa7, 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x12, 0x0c, 0xb7, 0x49, 0x18, 0x3a,
+	0x5b, 0x52, 0x6c, 0x98, 0x12, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x1c, 0x0c, 0x92,
+	0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x13, 0x02, 0x71, 0x70, 0x99, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x27,
+	0x0b, 0xa6, 0x54, 0x5f, 0x79, 0x5b, 0xc7, 0x70, 0x5d, 0xfb, 0x2a, 0x40, 0x43, 0x7e, 0x60, 0xc8,
+	0x8e, 0xd9, 0xb1, 0xd7, 0x2e, 0x65, 0x4a, 0x34, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b,
+	0xd4, 0xec, 0xdf, 0xb4, 0xe0, 0x44, 0xe2, 0x8b, 0x6e, 0xb9, 0x61, 0x84, 0x3e, 0x4c, 0x7d, 0xd5,
+	0x5c, 0x9f, 0x8b, 0xcf, 0x0d, 0xf9, 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0x83,
+	0x6e, 0x44, 0xda, 0xf2, 0x63, 0x9e, 0x2b, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0x56, 0x69, 0x4d,
+	0xcc, 0x09, 0xd8, 0xbf, 0x5d, 0x86, 0x51, 0xbe, 0xbf, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xcb,
+	0x30, 0xea, 0xb6, 0xdb, 0xdd, 0xc8, 0xb9, 0x2f, 0xce, 0xbd, 0x11, 0xce, 0x83, 0x56, 0x65, 0x21,
+	0x8e, 0xe1, 0x68, 0x15, 0x06, 0x58, 0x57, 0xf8, 0x57, 0xbe, 0x98, 0xfd, 0x95, 0xa2, 0xef, 0x73,
+	0x4b, 0x4e, 0xe4, 0x70, 0x91, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x77,
+	0x3d, 0x27, 0xd8, 0xa3, 0x65, 0x95, 0x32, 0x23, 0xf8, 0x6a, 0x31, 0xc1, 0x05, 0x85, 0xcf, 0xc9,
+	0xaa, 0x0f, 0x8b, 0x01, 0x58, 0x23, 0x3a, 0xfb, 0x05, 0x18, 0x55, 0xc8, 0x47, 0x91, 0x1c, 0x67,
+	0xbf, 0x04, 0x53, 0x89, 0xb6, 0x7a, 0x55, 0x1f, 0xd7, 0x05, 0xcf, 0x7f, 0xcc, 0x58, 0x86, 0xe8,
+	0xf5, 0xb2, 0xb7, 0x2b, 0xce, 0xa6, 0x47, 0x70, 0xb2, 0x95, 0xc1, 0xf2, 0xc5, 0xbc, 0xf6, 0x7f,
+	0x44, 0x9c, 0x13, 0x9f, 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x30, 0x38, 0x62, 0xa9, 0x88, 0x23,
+	0x52, 0x7e, 0x77, 0x52, 0x75, 0xfe, 0x26, 0xd9, 0x53, 0x4c, 0xf5, 0x07, 0xd9, 0xfd, 0xf3, 0x7c,
+	0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0xca, 0x37, 0xc9, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xca, 0x85,
+	0x5f, 0xf7, 0x2b, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x06, 0xbe, 0xb0, 0x60, 0xf2, 0x85, 0xf3, 0x85,
+	0x0b, 0x3c, 0x87, 0x23, 0xfc, 0x46, 0x09, 0xce, 0x2a, 0x1c, 0x7a, 0x89, 0xe2, 0x7f, 0xc4, 0xaa,
+	0xba, 0x02, 0xa3, 0x9e, 0x52, 0x27, 0x5a, 0xa6, 0x1e, 0x2f, 0x56, 0x26, 0xc6, 0x38, 0xf4, 0xc8,
+	0xf3, 0xe2, 0x43, 0x7b, 0x5c, 0xd7, 0xb3, 0x8b, 0xc3, 0x7d, 0x01, 0xca, 0x5d, 0xb7, 0x29, 0x0e,
+	0x98, 0xcf, 0xc9, 0xd1, 0xbe, 0xb3, 0xba, 0x74, 0xb8, 0x5f, 0x7d, 0x36, 0xcf, 0xe4, 0x44, 0x4f,
+	0xb6, 0x70, 0xee, 0xce, 0xea, 0x12, 0xa6, 0x95, 0xd1, 0x3c, 0x4c, 0x49, 0xab, 0xda, 0x5d, 0x2a,
+	0x97, 0xfa, 0x9e, 0x38, 0x87, 0x94, 0xb2, 0x1c, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x4b, 0x30, 0xbd,
+	0xd3, 0xbd, 0x4f, 0x5a, 0x24, 0xe2, 0x1f, 0x7c, 0x93, 0x70, 0x55, 0xf2, 0x68, 0x7c, 0x85, 0xbd,
+	0x99, 0x80, 0xe3, 0x54, 0x0d, 0xfb, 0x4f, 0xd9, 0x79, 0x20, 0x46, 0x4f, 0x93, 0x6f, 0x7e, 0x90,
+	0xcb, 0xb9, 0x9f, 0x55, 0x71, 0x93, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x5e, 0x15, 0xc6, 0x9a,
+	0x1f, 0x28, 0x5c, 0xf3, 0xbf, 0x56, 0x82, 0x53, 0x6a, 0x04, 0x0c, 0x69, 0xf9, 0xcf, 0xfa, 0x18,
+	0x5c, 0x85, 0xb1, 0x26, 0xd9, 0x74, 0xba, 0xad, 0x48, 0xd9, 0x35, 0x06, 0xb9, 0xa9, 0x6d, 0x29,
+	0x2e, 0xc6, 0x3a, 0xce, 0x11, 0x86, 0xed, 0x6f, 0x4d, 0xb2, 0x83, 0x38, 0x72, 0xe8, 0x1a, 0x57,
+	0xbb, 0xc6, 0xca, 0xdd, 0x35, 0xcf, 0xc1, 0xa0, 0xdb, 0xa6, 0x82, 0x59, 0xc9, 0x94, 0xb7, 0x56,
+	0x69, 0x21, 0xe6, 0x30, 0xf4, 0x02, 0x0c, 0x37, 0xfc, 0x76, 0xdb, 0xf1, 0x9a, 0xec, 0xc8, 0x1b,
+	0x5d, 0x18, 0xa3, 0xb2, 0xdb, 0x22, 0x2f, 0xc2, 0x12, 0x46, 0x85, 0x6f, 0x27, 0xd8, 0xe2, 0xca,
+	0x1e, 0x21, 0x7c, 0xcf, 0x07, 0x5b, 0x21, 0x66, 0xa5, 0xf4, 0xae, 0xfa, 0xc0, 0x0f, 0x76, 0x5c,
+	0x6f, 0x6b, 0xc9, 0x0d, 0xc4, 0x96, 0x50, 0x67, 0xe1, 0x3d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x81,
+	0xc1, 0x8e, 0x1f, 0x44, 0x61, 0x65, 0x88, 0x0d, 0xf7, 0xb3, 0x39, 0x8c, 0x88, 0x7f, 0x6d, 0xcd,
+	0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xea, 0xe8, 0x16, 0x0c, 0x13, 0x6f, 0x77, 0x25,
+	0xf0, 0xdb, 0x95, 0x13, 0xf9, 0x94, 0x96, 0x39, 0x0a, 0x5f, 0x66, 0xb1, 0x8c, 0x2a, 0x8a, 0xb1,
+	0x24, 0x81, 0xbe, 0x08, 0x65, 0xe2, 0xed, 0x56, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0x77, 0x9d,
+	0x20, 0xe6, 0xf9, 0xcb, 0xde, 0x2e, 0xa6, 0x75, 0xd0, 0x57, 0x60, 0x54, 0x32, 0x8c, 0x50, 0x68,
+	0x51, 0x33, 0x17, 0xac, 0x64, 0x33, 0x98, 0x7c, 0xd4, 0x75, 0x03, 0xd2, 0x26, 0x5e, 0x14, 0xc6,
+	0x1c, 0x52, 0x42, 0x43, 0x1c, 0x53, 0x43, 0x0d, 0x18, 0x0f, 0x48, 0xe8, 0x3e, 0x22, 0x35, 0xbf,
+	0xe5, 0x36, 0xf6, 0x2a, 0x67, 0x58, 0xf7, 0x5e, 0x2a, 0x1c, 0x32, 0xac, 0x55, 0x88, 0xb5, 0xfc,
+	0x7a, 0x29, 0x36, 0x88, 0xa2, 0x0f, 0x60, 0x22, 0x20, 0x61, 0xe4, 0x04, 0x91, 0x68, 0xa5, 0xa2,
+	0xac, 0x72, 0x13, 0x58, 0x07, 0xf0, 0xeb, 0x44, 0xdc, 0x4c, 0x0c, 0xc1, 0x26, 0x05, 0x14, 0x01,
+	0x32, 0x0a, 0x70, 0xb7, 0x45, 0xc2, 0xca, 0xd9, 0x7c, 0x6b, 0x66, 0x92, 0x2c, 0xad, 0xb0, 0x30,
+	0x2b, 0x3a, 0x8f, 0x70, 0x8a, 0x16, 0xce, 0xa0, 0x8f, 0xbe, 0x22, 0x0d, 0x1d, 0x6b, 0x7e, 0xd7,
+	0x8b, 0xc2, 0xca, 0x28, 0x6b, 0x2f, 0xd3, 0x22, 0x7e, 0x37, 0xc6, 0x4b, 0x5a, 0x42, 0x78, 0x65,
+	0x6c, 0x90, 0x42, 0x5f, 0x87, 0x09, 0xfe, 0x9f, 0x1b, 0x72, 0xc3, 0xca, 0x29, 0x46, 0xfb, 0x62,
+	0x3e, 0x6d, 0x8e, 0xb8, 0x70, 0x4a, 0x10, 0x9f, 0xd0, 0x4b, 0x43, 0x6c, 0x52, 0x43, 0x18, 0x26,
+	0x5a, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0xd6, 0x02, 0xff, 0x3e, 0x11, 0x7a, 0xe9, 0xb3, 0xd9, 0x86,
+	0x5f, 0xff, 0x3e, 0x11, 0x57, 0x4f, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x3b, 0x30, 0x19, 0x10, 0xa7,
+	0xe9, 0xc6, 0x44, 0xc7, 0x7a, 0x11, 0x65, 0xd7, 0x75, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x36,
+	0x8c, 0xb3, 0x81, 0xef, 0x76, 0x38, 0xd1, 0xd3, 0xbd, 0x88, 0x32, 0x37, 0x86, 0xba, 0x56, 0x05,
+	0x1b, 0x04, 0xd0, 0xfb, 0x30, 0xda, 0x72, 0x37, 0x49, 0x63, 0xaf, 0xd1, 0x22, 0x95, 0x71, 0x46,
+	0x2d, 0x93, 0x05, 0xdf, 0x92, 0x48, 0xfc, 0x56, 0xa0, 0xfe, 0xe2, 0xb8, 0x3a, 0xba, 0x0b, 0xa7,
+	0x23, 0x12, 0xb4, 0x5d, 0xcf, 0xa1, 0xac, 0x53, 0x5c, 0x44, 0x99, 0x3d, 0x7e, 0x82, 0xad, 0xe9,
+	0x0b, 0x62, 0x36, 0x4e, 0x6f, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x21, 0x54, 0x32, 0x20, 0x7c,
+	0xb7, 0x9c, 0x64, 0x94, 0xdf, 0x11, 0x94, 0x2b, 0x1b, 0x39, 0x78, 0x87, 0x05, 0x30, 0x9c, 0x4b,
+	0x1d, 0xdd, 0x86, 0x29, 0xc6, 0xaf, 0x6b, 0xdd, 0x56, 0x4b, 0x34, 0x38, 0xc9, 0x1a, 0x7c, 0x41,
+	0x4a, 0x2f, 0xab, 0x26, 0xf8, 0x70, 0xbf, 0x0a, 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0xee, 0x33, 0xd3,
+	0x6f, 0x37, 0x70, 0xa3, 0x3d, 0xba, 0xe9, 0xc8, 0xc3, 0xa8, 0x32, 0x55, 0xa8, 0x06, 0xd3, 0x51,
+	0x95, 0x7d, 0x58, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x00, 0x0a, 0xa3, 0xa6, 0xeb, 0x55, 0xa6, 0xf9,
+	0x2d, 0x4e, 0xf2, 0xef, 0x3a, 0x2d, 0xc4, 0x1c, 0xc6, 0xcc, 0xbe, 0xf4, 0xc7, 0x6d, 0x7a, 0xce,
+	0xcf, 0x30, 0xc4, 0xd8, 0xec, 0x2b, 0x01, 0x38, 0xc6, 0xa1, 0xa2, 0x77, 0x14, 0xed, 0x55, 0x10,
+	0x43, 0x55, 0x6c, 0x78, 0x63, 0xe3, 0x2b, 0x98, 0x96, 0xdb, 0xbf, 0x6b, 0xc1, 0x45, 0xc5, 0x46,
+	0x96, 0x1f, 0x46, 0xc4, 0x6b, 0x92, 0xa6, 0xce, 0x73, 0x49, 0x18, 0xa1, 0xb7, 0x61, 0xa2, 0x21,
+	0x71, 0x34, 0x13, 0xb5, 0xda, 0xa5, 0x8b, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x1a, 0xe3, 0xc6, 0x8c,
+	0x9e, 0xa6, 0x6c, 0xd2, 0x59, 0xac, 0x82, 0x61, 0x03, 0x13, 0xbd, 0x09, 0x63, 0x01, 0xef, 0x01,
+	0xab, 0x58, 0x36, 0x3d, 0x25, 0x70, 0x0c, 0xc2, 0x3a, 0x9e, 0x7d, 0x1f, 0x26, 0x55, 0x87, 0xd8,
+	0x34, 0xa3, 0x2a, 0x0c, 0x32, 0xf9, 0x59, 0xe8, 0xa1, 0x47, 0xe9, 0xa8, 0x32, 0xd9, 0x1a, 0xf3,
+	0x72, 0x36, 0xaa, 0xee, 0x23, 0xb2, 0xb0, 0x17, 0x11, 0xae, 0xd4, 0x29, 0x6b, 0xa3, 0x2a, 0x01,
+	0x38, 0xc6, 0xb1, 0xff, 0x7f, 0x7e, 0x0f, 0x89, 0x8f, 0xdb, 0x3e, 0x04, 0x8c, 0x57, 0x60, 0x84,
+	0x79, 0xd0, 0xf8, 0x01, 0x37, 0x73, 0x0f, 0xc6, 0x37, 0x8f, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x63,
+	0xcc, 0x59, 0x15, 0x2e, 0x1d, 0xa5, 0xc7, 0x9c, 0xd5, 0x33, 0x71, 0xd1, 0x35, 0x18, 0x61, 0xce,
+	0x62, 0x0d, 0xbf, 0x25, 0xc4, 0x76, 0x29, 0xe2, 0x8d, 0xd4, 0x44, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b,
+	0x6c, 0x74, 0x09, 0x86, 0x68, 0x17, 0x56, 0x6b, 0x42, 0x2e, 0x51, 0x2a, 0xd5, 0x1b, 0xac, 0x14,
+	0x0b, 0xa8, 0xfd, 0x9b, 0x16, 0x13, 0x4a, 0xd3, 0x87, 0x27, 0xba, 0x91, 0x98, 0x6f, 0x3e, 0x20,
+	0xcf, 0x67, 0xcd, 0xf7, 0x61, 0xf1, 0xfc, 0x7f, 0x35, 0x79, 0xc4, 0xf2, 0xa5, 0xf3, 0x86, 0x1c,
+	0x82, 0xe4, 0x31, 0xfb, 0x4c, 0xbc, 0x6e, 0x69, 0x7f, 0x8a, 0xce, 0x5a, 0xfb, 0xb7, 0xf8, 0x35,
+	0x39, 0x75, 0x7c, 0xa2, 0x25, 0x18, 0x72, 0xd8, 0x0d, 0x43, 0x74, 0xfc, 0x15, 0x39, 0x00, 0xf3,
+	0xac, 0xf4, 0x50, 0xd8, 0xab, 0x93, 0xf5, 0x38, 0x14, 0x8b, 0xba, 0xe8, 0x9b, 0x30, 0x4a, 0x1e,
+	0xba, 0xd1, 0xa2, 0xdf, 0x14, 0x0b, 0xca, 0xd4, 0x95, 0x16, 0x9e, 0xe0, 0xb7, 0xbd, 0x65, 0x59,
+	0x95, 0x33, 0x6d, 0xf5, 0x17, 0xc7, 0x44, 0xed, 0x9f, 0xb3, 0xa0, 0xda, 0xa3, 0x36, 0xba, 0x47,
+	0x85, 0x65, 0x12, 0x38, 0x91, 0x2f, 0xed, 0x9e, 0x6f, 0xcb, 0x65, 0x70, 0x5b, 0x94, 0x1f, 0xee,
+	0x57, 0x5f, 0xec, 0x41, 0x46, 0xa2, 0x62, 0x45, 0x0c, 0xd9, 0x30, 0xc4, 0xd4, 0x25, 0x5c, 0xfa,
+	0x1f, 0xe4, 0xc6, 0xcf, 0xbb, 0xac, 0x04, 0x0b, 0x88, 0xfd, 0x57, 0x4a, 0xda, 0x3e, 0xac, 0x47,
+	0x4e, 0x44, 0x50, 0x0d, 0x86, 0x1f, 0x38, 0x6e, 0xe4, 0x7a, 0x5b, 0xe2, 0x8a, 0x52, 0x2c, 0x93,
+	0xb1, 0x4a, 0xf7, 0x78, 0x05, 0x2e, 0x68, 0x8b, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0xae, 0xe7,
+	0x51, 0x8a, 0xa5, 0x7e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0x3e, 0x04,
+	0x90, 0xc7, 0x0a, 0x69, 0x0a, 0x35, 0xf7, 0x2b, 0xbd, 0x89, 0x6e, 0xa8, 0x3a, 0x5c, 0x8f, 0x1e,
+	0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0x76, 0x8d, 0xde, 0x19, 0xf4, 0x35, 0xca, 0xd7, 0x9d, 0x20,
+	0x22, 0xcd, 0xf9, 0x48, 0x0c, 0xce, 0x67, 0xfb, 0xd3, 0x63, 0x6c, 0xb8, 0x6d, 0xa2, 0x9f, 0x01,
+	0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x5e, 0x86, 0x4a, 0x5e, 0x77, 0x29, 0x5b, 0x92, 0xab, 0x4a,
+	0xd8, 0x1f, 0x14, 0x5b, 0x92, 0x4b, 0x00, 0x2b, 0x0c, 0xca, 0x1f, 0x42, 0x77, 0x4b, 0xaa, 0xa1,
+	0x06, 0x63, 0xfe, 0x50, 0x67, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0xf8, 0x89, 0x6a,
+	0x7c, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0x42, 0x7c, 0xa0, 0x87, 0x42, 0xdc, 0x18, 0xa2, 0xc1,
+	0x27, 0x3b, 0x44, 0xe8, 0x1b, 0x00, 0x9b, 0xae, 0xe7, 0x86, 0xdb, 0x8c, 0xfa, 0xd0, 0x91, 0xa9,
+	0xab, 0xfb, 0xdb, 0x8a, 0xa2, 0x82, 0x35, 0x8a, 0xf4, 0x2c, 0x53, 0x2c, 0x7a, 0x75, 0x89, 0x79,
+	0xa9, 0x68, 0x67, 0x59, 0x7c, 0x5e, 0x2d, 0x61, 0x1d, 0xcf, 0xfe, 0x56, 0x72, 0xbd, 0x88, 0x1d,
+	0xa0, 0x8d, 0xaf, 0xd5, 0xef, 0xf8, 0x96, 0x8a, 0xc7, 0xd7, 0xfe, 0x17, 0xa3, 0x30, 0x65, 0x34,
+	0xd6, 0x0d, 0xfb, 0x38, 0xd5, 0xae, 0x53, 0xa9, 0xc5, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad,
+	0xa2, 0x4b, 0x36, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x1b, 0x30, 0xda, 0x72, 0x42, 0xa6, 0x5c, 0x27,
+	0x62, 0xdf, 0xf5, 0x43, 0x2c, 0xd6, 0x5d, 0x38, 0x61, 0xa4, 0x89, 0x8a, 0x9c, 0x76, 0x4c, 0x92,
+	0x8a, 0x57, 0x54, 0x28, 0x97, 0x8e, 0xc8, 0xaa, 0x13, 0x54, 0x72, 0xdf, 0xc3, 0x1c, 0x26, 0x84,
+	0x15, 0xba, 0x2a, 0x16, 0xe9, 0x15, 0x86, 0x2d, 0xb3, 0x41, 0x43, 0x58, 0x51, 0x30, 0x6c, 0x60,
+	0xc6, 0xea, 0x83, 0xa1, 0x02, 0xf5, 0xc1, 0x4b, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0x63,
+	0x95, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x91, 0xfe, 0x16, 0x0c, 0x7a, 0x01, 0x86, 0xc5, 0xa2,
+	0x66, 0x1e, 0x42, 0x23, 0x9c, 0xcb, 0x89, 0x25, 0x8f, 0x25, 0x0c, 0xfd, 0xbc, 0x05, 0xc8, 0x69,
+	0xb5, 0xfc, 0x06, 0xe3, 0x50, 0xea, 0x1e, 0x0e, 0xec, 0x7e, 0xf6, 0x76, 0xcf, 0x61, 0xef, 0x86,
+	0x73, 0xf3, 0xa9, 0xda, 0x5c, 0xa9, 0xff, 0x96, 0xbc, 0x7e, 0xa6, 0x11, 0xf4, 0xe3, 0xfe, 0x96,
+	0x1b, 0x46, 0xdf, 0xfe, 0x57, 0x89, 0xe3, 0x3f, 0xa3, 0x4b, 0xe8, 0x8e, 0xae, 0x27, 0x18, 0x3b,
+	0xa2, 0x9e, 0x60, 0x22, 0x57, 0x47, 0xf0, 0xe7, 0x12, 0xb7, 0xde, 0x71, 0xf6, 0xe5, 0x2f, 0xf4,
+	0xb8, 0xf5, 0x0a, 0xcb, 0x4f, 0x3f, 0x77, 0xdf, 0x9a, 0x70, 0x59, 0x98, 0x60, 0x5d, 0x2e, 0xd6,
+	0xd7, 0xdc, 0x09, 0x49, 0xb0, 0x70, 0x56, 0x7a, 0x34, 0x1c, 0xea, 0xd2, 0x9d, 0xe6, 0xe2, 0xf0,
+	0xe3, 0x16, 0x54, 0xd2, 0x03, 0xc4, 0xbb, 0x54, 0x99, 0x64, 0xfd, 0xb7, 0x8b, 0x46, 0x46, 0x74,
+	0x5e, 0x7a, 0x66, 0x57, 0xe6, 0x73, 0x68, 0xe1, 0xdc, 0x56, 0xd0, 0x35, 0x80, 0x30, 0xf2, 0x3b,
+	0x9c, 0xd7, 0xb3, 0x1b, 0xd0, 0x28, 0xf3, 0x0d, 0x82, 0xba, 0x2a, 0x3d, 0x8c, 0xcf, 0x02, 0x0d,
+	0x77, 0xb6, 0x0b, 0x67, 0x72, 0x56, 0x4c, 0x86, 0x69, 0x66, 0x49, 0x37, 0xcd, 0xf4, 0x50, 0xe8,
+	0xcf, 0xc9, 0x39, 0x9d, 0xfb, 0xa0, 0xeb, 0x78, 0x91, 0x1b, 0xed, 0xe9, 0xa6, 0x1c, 0x0f, 0xcc,
+	0xa1, 0x44, 0x5f, 0x87, 0xc1, 0x96, 0xeb, 0x75, 0x1f, 0x8a, 0x33, 0xf6, 0x52, 0xf6, 0x9d, 0xd9,
+	0xeb, 0x3e, 0x34, 0x27, 0xa7, 0x4a, 0xb7, 0x32, 0x2b, 0x3f, 0xdc, 0xaf, 0xa2, 0x34, 0x02, 0xe6,
+	0x54, 0xed, 0xcf, 0xc2, 0xe4, 0x92, 0x43, 0xda, 0xbe, 0xb7, 0xec, 0x35, 0x3b, 0xbe, 0xeb, 0x45,
+	0xa8, 0x02, 0x03, 0x4c, 0x7c, 0xe7, 0x47, 0xeb, 0x00, 0x1d, 0x7c, 0xcc, 0x4a, 0xec, 0x2d, 0x38,
+	0xb5, 0xe4, 0x3f, 0xf0, 0x1e, 0x38, 0x41, 0x73, 0xbe, 0xb6, 0xaa, 0xa9, 0xb6, 0xd7, 0xa5, 0x6a,
+	0xd5, 0xca, 0x57, 0x5c, 0x69, 0x35, 0xf9, 0x22, 0x5c, 0x71, 0x5b, 0x24, 0xc7, 0x00, 0xf1, 0xd7,
+	0x4b, 0x46, 0x4b, 0x31, 0xbe, 0x32, 0x9f, 0x5b, 0xb9, 0x9e, 0x37, 0x1f, 0xc0, 0xc8, 0xa6, 0x4b,
+	0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x6c, 0xbc, 0x98, 0xef, 0x9b, 0xbb, 0x42, 0x31, 0x95, 0x9d, 0x9f,
+	0x29, 0x66, 0x57, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x96, 0x73, 0x26, 0xa1, 0x82, 0xdf,
+	0xbf, 0x54, 0xb4, 0x7c, 0x4d, 0xe2, 0xec, 0x9d, 0x02, 0x4e, 0x90, 0xc1, 0x29, 0xc2, 0xe8, 0x1c,
+	0x0c, 0xb4, 0xa9, 0x64, 0x33, 0xc0, 0x86, 0x9f, 0x69, 0x62, 0x99, 0x52, 0x99, 0x95, 0xda, 0x7f,
+	0xc3, 0x82, 0x33, 0xa9, 0x91, 0x11, 0xca, 0xf5, 0x27, 0x3c, 0x0b, 0x49, 0x65, 0x77, 0xa9, 0xb7,
+	0xb2, 0xdb, 0xfe, 0x2f, 0x2c, 0x38, 0xb9, 0xdc, 0xee, 0x44, 0x7b, 0x4b, 0xae, 0xe9, 0x26, 0xf3,
+	0x05, 0x18, 0x6a, 0x93, 0xa6, 0xdb, 0x6d, 0x8b, 0x99, 0xab, 0xca, 0xd3, 0x7f, 0x8d, 0x95, 0x52,
+	0x0e, 0x52, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, 0x26, 0x43, 0xb9, 0x8f, 0xc8,
+	0x2d, 0xb7, 0xed, 0x46, 0x8f, 0xb7, 0xbb, 0x84, 0x87, 0x8b, 0x24, 0x82, 0x63, 0x7a, 0xf6, 0xf7,
+	0x2d, 0x98, 0x92, 0xeb, 0x7e, 0xbe, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0x28, 0xb9, 0x1d, 0xd1,
+	0x4b, 0x10, 0xbd, 0x2c, 0xad, 0xd6, 0x70, 0xc9, 0xed, 0xc8, 0x0b, 0xb1, 0x17, 0x5f, 0xee, 0x8d,
+	0x0b, 0xb1, 0xc7, 0xde, 0x4c, 0x48, 0x0c, 0x74, 0x19, 0x46, 0x3c, 0xbf, 0xc9, 0xef, 0x94, 0xc2,
+	0xdd, 0x83, 0x62, 0xae, 0x8b, 0x32, 0xac, 0xa0, 0xa8, 0x06, 0xa3, 0xdc, 0x15, 0x3c, 0x5e, 0xb4,
+	0x7d, 0x39, 0x94, 0xb3, 0x2f, 0xdb, 0x90, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0xa7, 0x16, 0x8c, 0xcb,
+	0x2f, 0xeb, 0xf3, 0xb6, 0x4f, 0xb7, 0x56, 0x7c, 0xd3, 0x8f, 0xb7, 0x16, 0xbd, 0xad, 0x33, 0x88,
+	0x71, 0x49, 0x2f, 0x1f, 0xe9, 0x92, 0x7e, 0x15, 0xc6, 0x9c, 0x4e, 0xa7, 0x66, 0xde, 0xf0, 0xd9,
+	0x52, 0x9a, 0x8f, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x6c, 0x09, 0x26, 0xe5, 0x17, 0xd4, 0xbb, 0xf7,
+	0x43, 0x12, 0xa1, 0x0d, 0x18, 0x75, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x9f, 0xcb, 0x56, 0xe1, 0x1b,
+	0x53, 0x1a, 0x0b, 0xd2, 0xf3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x16, 0xcc, 0x78, 0x7e, 0xc4, 0x84,
+	0x2a, 0x05, 0x2f, 0xf2, 0xaa, 0x48, 0x52, 0x3f, 0x2b, 0xa8, 0xcf, 0xac, 0x27, 0xa9, 0xe0, 0x34,
+	0x61, 0xb4, 0x2c, 0xcd, 0x22, 0xe5, 0x7c, 0xcd, 0xb2, 0x3e, 0x71, 0xd9, 0x56, 0x11, 0xfb, 0x9f,
+	0x58, 0x30, 0x2a, 0xd1, 0x8e, 0xc3, 0x81, 0x66, 0x0d, 0x86, 0x43, 0x36, 0x09, 0x72, 0x68, 0xec,
+	0xa2, 0x8e, 0xf3, 0xf9, 0x8a, 0x65, 0x45, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0xb3, 0x8a, 0xab, 0xee,
+	0x7f, 0x42, 0xac, 0xe2, 0xaa, 0x3f, 0x39, 0x87, 0xd2, 0xbf, 0x61, 0x7d, 0xd6, 0xcc, 0x4c, 0xf4,
+	0x4a, 0xd3, 0x09, 0xc8, 0xa6, 0xfb, 0x30, 0x79, 0xa5, 0xa9, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x43,
+	0x18, 0x6f, 0x48, 0x73, 0x68, 0xbc, 0xc3, 0x2f, 0x15, 0x9a, 0xe6, 0x95, 0x17, 0x07, 0x57, 0xac,
+	0x2f, 0x6a, 0xf5, 0xb1, 0x41, 0xcd, 0x74, 0x75, 0x2c, 0xf7, 0x72, 0x75, 0x8c, 0xe9, 0xe6, 0x3b,
+	0xfe, 0xfd, 0x9c, 0x05, 0x43, 0xdc, 0x0c, 0xd6, 0x9f, 0x15, 0x52, 0x73, 0x6a, 0x89, 0xc7, 0x8e,
+	0x29, 0x57, 0x84, 0x64, 0x83, 0xd6, 0x60, 0x94, 0xfd, 0x60, 0x66, 0xbc, 0x72, 0xfe, 0xc3, 0x48,
+	0xde, 0xaa, 0xde, 0xc1, 0xbb, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x7f, 0x54, 0xa6, 0xdc, 0x2d, 0x46,
+	0x35, 0x0e, 0x7d, 0xeb, 0xe9, 0x1d, 0xfa, 0xa5, 0xa7, 0x75, 0xe8, 0x6f, 0xc1, 0x54, 0x43, 0x73,
+	0x81, 0x89, 0x67, 0xf2, 0x72, 0xe1, 0x22, 0xd1, 0xbc, 0x65, 0xb8, 0xca, 0x7e, 0xd1, 0x24, 0x82,
+	0x93, 0x54, 0xd1, 0xd7, 0x60, 0x9c, 0xcf, 0xb3, 0x68, 0x85, 0x7b, 0x8b, 0xbe, 0x90, 0xbf, 0x5e,
+	0xf4, 0x26, 0xb8, 0x89, 0x47, 0xab, 0x8e, 0x0d, 0x62, 0xa8, 0x0e, 0xb0, 0xe9, 0xb6, 0x88, 0x20,
+	0x5d, 0xe0, 0xd8, 0xbd, 0xc2, 0xb1, 0x14, 0xe1, 0x49, 0xae, 0x87, 0x90, 0x55, 0xb1, 0x46, 0xc6,
+	0xfe, 0x77, 0x16, 0xa0, 0xe5, 0xce, 0x36, 0x69, 0x93, 0xc0, 0x69, 0xc5, 0xe6, 0xf1, 0x9f, 0xb4,
+	0xa0, 0x42, 0x52, 0xc5, 0x8b, 0x7e, 0xbb, 0x2d, 0x34, 0x0c, 0x39, 0x4a, 0xb0, 0xe5, 0x9c, 0x3a,
+	0xf1, 0x2d, 0x23, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0xe0, 0x04, 0x3f, 0x7a, 0x0d, 0xbb, 0x82,
+	0xd8, 0x11, 0xcf, 0x08, 0xc2, 0x27, 0x36, 0xd2, 0x28, 0x38, 0xab, 0x9e, 0xfd, 0x0f, 0x26, 0x21,
+	0xb7, 0x17, 0x9f, 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0xc0, 0xa7, 0x7e, 0x01, 0x9f,
+	0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0x80, 0xe6, 0x17, 0xf0, 0x57, 0x2d, 0x38, 0xa5,
+	0x0e, 0x4d, 0x43, 0xf7, 0xf0, 0xa3, 0x70, 0x82, 0x6f, 0xb7, 0xc5, 0x96, 0xe3, 0xb6, 0x37, 0x48,
+	0xbb, 0xd3, 0x72, 0x22, 0xe9, 0x73, 0x78, 0x35, 0x73, 0xe5, 0x26, 0x1e, 0x36, 0x19, 0x15, 0xf9,
+	0x0b, 0xd1, 0x0c, 0x00, 0xce, 0x6a, 0xc6, 0xfe, 0xf5, 0x11, 0x18, 0x5c, 0xde, 0x25, 0x5e, 0x74,
+	0x0c, 0xb7, 0xb4, 0x06, 0x4c, 0xba, 0xde, 0xae, 0xdf, 0xda, 0x25, 0x4d, 0x0e, 0x3f, 0x8a, 0x32,
+	0xe1, 0xb4, 0x20, 0x3d, 0xb9, 0x6a, 0x90, 0xc0, 0x09, 0x92, 0x4f, 0xc3, 0x50, 0x76, 0x1d, 0x86,
+	0xf8, 0x91, 0x27, 0x84, 0xc6, 0x4c, 0x9e, 0xcd, 0x06, 0x51, 0x1c, 0xe4, 0xb1, 0x11, 0x8f, 0x1f,
+	0xa9, 0xa2, 0x3a, 0xfa, 0x16, 0x4c, 0x6e, 0xba, 0x41, 0x18, 0x6d, 0xb8, 0x6d, 0x7a, 0x3e, 0xb4,
+	0x3b, 0x8f, 0x61, 0x18, 0x53, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x0b, 0x26, 0x5a,
+	0x8e, 0xde, 0xd4, 0xf0, 0x91, 0x9b, 0x52, 0xa7, 0xc3, 0x2d, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed,
+	0xd4, 0x60, 0xb6, 0x9d, 0x11, 0xa6, 0x99, 0x51, 0xdb, 0x89, 0x1b, 0x75, 0x38, 0x8c, 0x8a, 0x85,
+	0xec, 0x79, 0xd0, 0xa8, 0x29, 0x16, 0x6a, 0x8f, 0x80, 0xbe, 0x09, 0xa3, 0x84, 0x0e, 0x21, 0x25,
+	0x2c, 0x0e, 0x98, 0x2b, 0xfd, 0xf5, 0x75, 0xcd, 0x6d, 0x04, 0xbe, 0x69, 0x92, 0x5c, 0x96, 0x94,
+	0x70, 0x4c, 0x14, 0x2d, 0xc2, 0x50, 0x48, 0x02, 0x57, 0x99, 0x3d, 0x0a, 0xa6, 0x91, 0xa1, 0x71,
+	0x2b, 0x3c, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x84, 0x3b, 0xc3, 0xb8, 0xb9, 0xbc, 0x12, 0x0e,
+	0x0b, 0xef, 0xc3, 0x70, 0x40, 0x5a, 0xcc, 0xe6, 0x3d, 0xd1, 0xff, 0x22, 0xe7, 0x26, 0x74, 0x5e,
+	0x0f, 0x4b, 0x02, 0xe8, 0x26, 0x95, 0x57, 0xa8, 0x58, 0xe9, 0x7a, 0x5b, 0xea, 0xd1, 0x8c, 0x60,
+	0xb4, 0x4a, 0x7c, 0xc7, 0x31, 0x86, 0x7c, 0x7d, 0x8e, 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xa3, 0x4a,
+	0x57, 0xbd, 0x30, 0x72, 0x28, 0x83, 0xe3, 0x96, 0x07, 0xa5, 0x2a, 0xc2, 0x49, 0x04, 0x9c, 0xae,
+	0x63, 0xff, 0xa2, 0x05, 0x7c, 0x9c, 0x8f, 0x41, 0x41, 0xf2, 0xae, 0xa9, 0x20, 0x39, 0x9b, 0x3b,
+	0x73, 0x39, 0xca, 0x91, 0x5f, 0xb4, 0x60, 0x4c, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x05, 0x6b, 0xb6,
+	0x0b, 0xd3, 0x74, 0xa5, 0xdf, 0xbe, 0x1f, 0x92, 0x60, 0x97, 0x34, 0xd9, 0xc2, 0x2c, 0x3d, 0xde,
+	0xc2, 0x54, 0x0e, 0xfa, 0xb7, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0x9b, 0xb2, 0xab, 0xea, 0x3d,
+	0x43, 0x43, 0xcd, 0x79, 0xe2, 0x3d, 0x83, 0x9a, 0x55, 0x1c, 0xe3, 0xd0, 0xad, 0xb6, 0xed, 0x87,
+	0x51, 0xf2, 0x3d, 0xc3, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xaf, 0x03, 0x2c, 0x3f, 0x24, 0x0d,
+	0xbe, 0x62, 0xf5, 0xab, 0x96, 0x95, 0x7f, 0xd5, 0xb2, 0x7f, 0xcf, 0x82, 0xc9, 0x95, 0x45, 0xe3,
+	0xe4, 0x9a, 0x03, 0xe0, 0xf7, 0xc3, 0x7b, 0xf7, 0xd6, 0xa5, 0x2f, 0x18, 0x77, 0xd6, 0x50, 0xa5,
+	0x58, 0xc3, 0x40, 0x67, 0xa1, 0xdc, 0xea, 0x7a, 0x42, 0x83, 0x3b, 0x4c, 0x8f, 0xc7, 0x5b, 0x5d,
+	0x0f, 0xd3, 0x32, 0xed, 0xe5, 0x69, 0xb9, 0xef, 0x97, 0xa7, 0x3d, 0x03, 0x60, 0xa1, 0x2a, 0x0c,
+	0x3e, 0x78, 0xe0, 0x36, 0x79, 0x5c, 0x0f, 0xe1, 0xa7, 0x76, 0xef, 0xde, 0xea, 0x52, 0x88, 0x79,
+	0xb9, 0xfd, 0xcb, 0x16, 0x4c, 0x25, 0x6e, 0xfb, 0xf4, 0xd6, 0xb8, 0xab, 0xa2, 0x2a, 0x25, 0x83,
+	0xc7, 0x68, 0xf1, 0x96, 0x34, 0xac, 0x3e, 0x5e, 0x5c, 0x8b, 0x17, 0x3b, 0xe5, 0x3e, 0x5e, 0xec,
+	0x14, 0xbb, 0xe1, 0x7f, 0xaf, 0x0c, 0xb3, 0x2b, 0x2d, 0xf2, 0xf0, 0x63, 0x86, 0x63, 0xe9, 0xf7,
+	0xa9, 0xef, 0xd1, 0xd4, 0x77, 0x47, 0x7d, 0xce, 0xdd, 0x7b, 0x0a, 0x37, 0x61, 0x98, 0x7f, 0xba,
+	0x0c, 0xce, 0x92, 0x69, 0x4c, 0xcf, 0x1f, 0x90, 0x39, 0x3e, 0x84, 0xc2, 0x98, 0xae, 0xce, 0x78,
+	0x51, 0x8a, 0x25, 0xf1, 0xd9, 0xb7, 0x60, 0x5c, 0xc7, 0x3c, 0x52, 0x60, 0x85, 0xbf, 0x50, 0x86,
+	0x69, 0xda, 0x83, 0xa7, 0x3a, 0x11, 0x77, 0xd2, 0x13, 0xf1, 0xa4, 0x1f, 0xd7, 0xf7, 0x9e, 0x8d,
+	0x0f, 0x93, 0xb3, 0x71, 0x35, 0x6f, 0x36, 0x8e, 0x7b, 0x0e, 0xfe, 0xa2, 0x05, 0x27, 0x56, 0x5a,
+	0x7e, 0x63, 0x27, 0xf1, 0x00, 0xfe, 0x4d, 0x18, 0xa3, 0x27, 0x48, 0x68, 0xc4, 0x82, 0x32, 0xa2,
+	0x83, 0x09, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0xac, 0x2e, 0x65, 0x05, 0x15, 0x13, 0x20,
+	0xac, 0xe3, 0xd9, 0xff, 0xdc, 0x82, 0xf3, 0xd7, 0x17, 0x97, 0xe3, 0xa5, 0x98, 0x8a, 0x6b, 0x76,
+	0x09, 0x86, 0x3a, 0x4d, 0xad, 0x2b, 0xb1, 0x52, 0x7e, 0x89, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x21,
+	0x04, 0xef, 0x00, 0x5c, 0xc7, 0xb5, 0x45, 0x71, 0x54, 0x48, 0x1b, 0x9c, 0x95, 0x6b, 0x83, 0x7b,
+	0x01, 0x86, 0xe9, 0x51, 0xe6, 0x36, 0x64, 0xbf, 0xb9, 0xbb, 0x0c, 0x2f, 0xc2, 0x12, 0x66, 0xff,
+	0x82, 0x05, 0x27, 0xae, 0xbb, 0x11, 0x95, 0x33, 0x92, 0x81, 0xbb, 0xa8, 0xa0, 0x11, 0xba, 0x91,
+	0x1f, 0xec, 0x25, 0x79, 0x2f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x75, 0xd9, 0x93, 0xba,
+	0x92, 0x69, 0xf5, 0xc4, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0x5e, 0x4d, 0x37, 0x60, 0x9c, 0x5e, 0x72,
+	0x63, 0x35, 0x5e, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x8f, 0x2d, 0xa8, 0x5e, 0xe7, 0x81, 0x01,
+	0x36, 0xc3, 0x1c, 0xa6, 0xfb, 0x3a, 0x8c, 0x12, 0x69, 0x9e, 0x49, 0xfa, 0x72, 0x2b, 0xbb, 0x0d,
+	0x8f, 0x1f, 0xa6, 0xf0, 0xfa, 0x38, 0x33, 0x8e, 0x16, 0x66, 0x61, 0x05, 0x10, 0xd1, 0xdb, 0xd2,
+	0x03, 0xaa, 0xb1, 0xc8, 0x4c, 0xcb, 0x29, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x0d, 0x0b, 0x4e, 0xa9,
+	0x0f, 0xfe, 0xc4, 0x7d, 0xa6, 0xfd, 0xab, 0x25, 0x98, 0xb8, 0xb1, 0xb1, 0x51, 0xbb, 0x4e, 0x22,
+	0x6d, 0x55, 0x16, 0x3b, 0x5d, 0x60, 0xcd, 0x76, 0x5c, 0x74, 0xad, 0xed, 0x46, 0x6e, 0x6b, 0x8e,
+	0x87, 0x09, 0x9d, 0x5b, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa5, 0x4b,
+	0x31, 0xab, 0x9c, 0x27, 0x66, 0xa1, 0xd7, 0x61, 0x88, 0xc5, 0x29, 0x95, 0x93, 0xf0, 0x8c, 0xba,
+	0x15, 0xb2, 0xd2, 0xc3, 0xfd, 0xea, 0xe8, 0x1d, 0xbc, 0xca, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0,
+	0xd8, 0x76, 0x14, 0x75, 0x6e, 0x10, 0xa7, 0x49, 0x02, 0xc9, 0x65, 0x2f, 0x64, 0x71, 0x59, 0x3a,
+	0x08, 0x1c, 0x2d, 0x66, 0x4c, 0x71, 0x59, 0x88, 0x75, 0x3a, 0x76, 0x1d, 0x20, 0x86, 0x3d, 0x21,
+	0xb3, 0x99, 0xbd, 0x01, 0xa3, 0xf4, 0x73, 0xe7, 0x5b, 0xae, 0x53, 0xec, 0x98, 0xf0, 0x32, 0x8c,
+	0x4a, 0xb7, 0x83, 0x50, 0x44, 0x11, 0x62, 0x27, 0x92, 0xf4, 0x4a, 0x08, 0x71, 0x0c, 0xb7, 0x9f,
+	0x07, 0xe1, 0x1b, 0x5f, 0x44, 0xd2, 0xde, 0x84, 0x93, 0xcc, 0xc9, 0xdf, 0x89, 0xb6, 0x8d, 0x35,
+	0xda, 0x7b, 0x31, 0xbc, 0x22, 0xae, 0xa2, 0x25, 0xe5, 0x6d, 0x25, 0xa3, 0x54, 0x8c, 0x4b, 0x8a,
+	0xf1, 0xb5, 0xd4, 0xfe, 0xa3, 0x01, 0x78, 0x66, 0xb5, 0x9e, 0x1f, 0xfe, 0xee, 0x1a, 0x8c, 0x73,
+	0x09, 0x97, 0x2e, 0x0d, 0xa7, 0x25, 0xda, 0x55, 0x4a, 0xdb, 0x0d, 0x0d, 0x86, 0x0d, 0x4c, 0x2a,
+	0x11, 0xba, 0x1f, 0x79, 0xc9, 0x37, 0xdc, 0xab, 0x1f, 0xac, 0x63, 0x5a, 0x4e, 0xc1, 0x54, 0x58,
+	0xe6, 0x2c, 0x5d, 0x81, 0x95, 0xc0, 0xfc, 0x2e, 0x4c, 0xba, 0x61, 0x23, 0x74, 0x57, 0x3d, 0xba,
+	0x4f, 0xb5, 0x9d, 0xae, 0xd4, 0x24, 0xb4, 0xd3, 0x0a, 0x8a, 0x13, 0xd8, 0xda, 0xf9, 0x32, 0xd8,
+	0xb7, 0xc0, 0xdd, 0x33, 0xf8, 0x0e, 0x65, 0xff, 0x1d, 0xf6, 0x75, 0x21, 0xb3, 0x55, 0x08, 0xf6,
+	0xcf, 0x3f, 0x38, 0xc4, 0x12, 0x46, 0xef, 0xa0, 0x8d, 0x6d, 0xa7, 0x33, 0xdf, 0x8d, 0xb6, 0x97,
+	0xdc, 0xb0, 0xe1, 0xef, 0x92, 0x60, 0x8f, 0xa9, 0x0f, 0x46, 0xe2, 0x3b, 0xa8, 0x02, 0x2c, 0xde,
+	0x98, 0xaf, 0x51, 0x4c, 0x9c, 0xae, 0x83, 0xe6, 0x61, 0x4a, 0x16, 0xd6, 0x49, 0xc8, 0x8e, 0x80,
+	0x31, 0x46, 0x46, 0xbd, 0xaa, 0x16, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x14, 0x70, 0xe1, 0x49, 0x08,
+	0xb8, 0x5f, 0x80, 0x09, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x68, 0xe3, 0x9a, 0x02, 0xa6,
+	0x13, 0x5f, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x8f, 0x01, 0x98, 0x61, 0xd3, 0xf6, 0xe9, 0x0a,
+	0xfb, 0x61, 0x5a, 0x61, 0x77, 0xd2, 0x2b, 0xec, 0x49, 0x48, 0xee, 0x8f, 0xbd, 0xcc, 0xbe, 0x63,
+	0xc1, 0x0c, 0x53, 0xcb, 0x1b, 0xcb, 0xec, 0x0a, 0x8c, 0x06, 0xc6, 0x83, 0xf7, 0x51, 0xdd, 0xfa,
+	0x27, 0xdf, 0xae, 0xc7, 0x38, 0xe8, 0x3d, 0x80, 0x4e, 0xac, 0xf6, 0x2f, 0x19, 0x51, 0x8a, 0x21,
+	0x57, 0xe3, 0xaf, 0xd5, 0xb1, 0xbf, 0x05, 0xa3, 0xea, 0x45, 0xbb, 0xbc, 0x20, 0x5b, 0x39, 0x17,
+	0xe4, 0xde, 0x62, 0x84, 0xf4, 0x4c, 0x2c, 0x67, 0x7a, 0x26, 0xfe, 0x6b, 0x0b, 0x62, 0xa3, 0x0c,
+	0xfa, 0x00, 0x46, 0x3b, 0x3e, 0x73, 0x64, 0x0f, 0xe4, 0xeb, 0x90, 0xe7, 0x0b, 0xad, 0x3a, 0x3c,
+	0x14, 0x69, 0xc0, 0xa7, 0xa3, 0x26, 0xab, 0xe2, 0x98, 0x0a, 0xba, 0x09, 0xc3, 0x9d, 0x80, 0xd4,
+	0x23, 0x16, 0x27, 0xaf, 0x7f, 0x82, 0x7c, 0xf9, 0xf2, 0x8a, 0x58, 0x52, 0x48, 0xf8, 0x05, 0x97,
+	0xfb, 0xf7, 0x0b, 0xb6, 0xff, 0x7e, 0x09, 0xa6, 0x93, 0x8d, 0xa0, 0x77, 0x60, 0x80, 0x3c, 0x24,
+	0x0d, 0xf1, 0xa5, 0x99, 0xd2, 0x44, 0xac, 0x10, 0xe2, 0x43, 0x47, 0xff, 0x63, 0x56, 0x0b, 0xdd,
+	0x80, 0x61, 0x2a, 0x4a, 0x5c, 0x57, 0xd1, 0x64, 0x9f, 0xcd, 0x13, 0x47, 0x94, 0x4c, 0xc6, 0x3f,
+	0x4b, 0x14, 0x61, 0x59, 0x9d, 0x39, 0x12, 0x36, 0x3a, 0x75, 0x7a, 0x4b, 0x8b, 0x8a, 0x94, 0x09,
+	0x1b, 0x8b, 0x35, 0x8e, 0x24, 0xa8, 0x71, 0x47, 0x42, 0x59, 0x88, 0x63, 0x22, 0xe8, 0x3d, 0x18,
+	0x0c, 0x5b, 0x84, 0x74, 0x84, 0xa7, 0x48, 0xa6, 0x4a, 0xb7, 0x4e, 0x11, 0x04, 0x25, 0xa6, 0x02,
+	0x62, 0x05, 0x98, 0x57, 0xb4, 0x7f, 0xcd, 0x02, 0xe0, 0x9e, 0x97, 0x8e, 0xb7, 0x45, 0x8e, 0xc1,
+	0x0a, 0xb2, 0x04, 0x03, 0x61, 0x87, 0x34, 0x8a, 0xde, 0x77, 0xc4, 0xfd, 0xa9, 0x77, 0x48, 0x23,
+	0x5e, 0xed, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x9f, 0x00, 0x98, 0x8c, 0xd1, 0x56, 0x23, 0xd2, 0x46,
+	0xaf, 0x1a, 0x21, 0xb8, 0xce, 0x26, 0x42, 0x70, 0x8d, 0x32, 0x6c, 0x4d, 0xe1, 0xfe, 0x2d, 0x28,
+	0xb7, 0x9d, 0x87, 0x42, 0xa3, 0xfa, 0x72, 0x71, 0x37, 0x28, 0xfd, 0xb9, 0x35, 0xe7, 0x21, 0xbf,
+	0xc1, 0xbf, 0x2c, 0x77, 0xe7, 0x9a, 0xf3, 0xb0, 0xe7, 0x1b, 0x04, 0xda, 0x08, 0x6b, 0xcb, 0xf5,
+	0x84, 0x53, 0x61, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0x8f,
+	0x60, 0x58, 0xf8, 0xfc, 0x8a, 0xd8, 0xa0, 0x57, 0xfa, 0x68, 0x4f, 0xb8, 0x0c, 0xf3, 0x36, 0xaf,
+	0x48, 0x0d, 0x85, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x35, 0x0b, 0x26, 0xc5, 0x6f, 0xf1,
+	0x9c, 0x56, 0x48, 0xf0, 0x9f, 0xef, 0xbf, 0x0f, 0xa2, 0x22, 0xef, 0xca, 0xe7, 0xe5, 0x61, 0x6b,
+	0x02, 0x7b, 0xf6, 0x28, 0xd1, 0x0b, 0xf4, 0xf7, 0x2d, 0x38, 0xd9, 0x76, 0x1e, 0xf2, 0x16, 0x79,
+	0x19, 0x76, 0x22, 0xd7, 0x17, 0x6e, 0x2e, 0xef, 0xf4, 0x37, 0xfd, 0xa9, 0xea, 0xbc, 0x93, 0xd2,
+	0xba, 0x7c, 0x32, 0x0b, 0xa5, 0x67, 0x57, 0x33, 0xfb, 0x35, 0xbb, 0x09, 0x23, 0x72, 0xbd, 0x3d,
+	0xcd, 0x07, 0x0d, 0xac, 0x1d, 0xb1, 0xd6, 0x9e, 0x6a, 0x3b, 0xdf, 0x82, 0x71, 0x7d, 0x8d, 0x3d,
+	0xd5, 0xb6, 0x3e, 0x82, 0x13, 0x19, 0x6b, 0xe9, 0xa9, 0x36, 0xf9, 0x00, 0xce, 0xe6, 0xae, 0x8f,
+	0xa7, 0xfa, 0x20, 0xe5, 0x57, 0x2d, 0x9d, 0x0f, 0x1e, 0x83, 0x29, 0x6a, 0xd1, 0x34, 0x45, 0x5d,
+	0x28, 0xde, 0x39, 0x39, 0xf6, 0xa8, 0x0f, 0xf5, 0x4e, 0x53, 0xae, 0x8e, 0xde, 0x87, 0xa1, 0x16,
+	0x2d, 0x91, 0x9e, 0xe3, 0x76, 0xef, 0x1d, 0x19, 0x4b, 0xd4, 0xac, 0x3c, 0xc4, 0x82, 0x82, 0xfd,
+	0x33, 0x16, 0x64, 0x3c, 0xa9, 0xa1, 0x12, 0x56, 0xd7, 0x6d, 0xb2, 0x21, 0x29, 0xc7, 0x12, 0x96,
+	0x8a, 0x50, 0x75, 0x1e, 0xca, 0x5b, 0x6e, 0x53, 0xbc, 0xd6, 0x57, 0xe0, 0xeb, 0x14, 0xbc, 0xe5,
+	0x36, 0xd1, 0x0a, 0xa0, 0xb0, 0xdb, 0xe9, 0xb4, 0x98, 0x67, 0x98, 0xd3, 0xba, 0x1e, 0xf8, 0xdd,
+	0x0e, 0x77, 0x13, 0x2f, 0x73, 0xf5, 0x52, 0x3d, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x7f, 0x64, 0xc1,
+	0xc0, 0x31, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xd5, 0x5c, 0xd2, 0x22, 0xa5, 0xcc, 0x1c, 0x76, 0x1e,
+	0xb0, 0x70, 0x0d, 0x21, 0x13, 0x38, 0x32, 0x67, 0x6d, 0xdf, 0x82, 0x13, 0xb7, 0x7c, 0xa7, 0xb9,
+	0xe0, 0xb4, 0x1c, 0xaf, 0x41, 0x82, 0x55, 0x6f, 0xeb, 0x48, 0x6f, 0x32, 0x4a, 0x3d, 0xdf, 0x64,
+	0x5c, 0x83, 0x21, 0xb7, 0xa3, 0xe5, 0xa4, 0xb8, 0x48, 0x67, 0x77, 0xb5, 0x26, 0xd2, 0x51, 0x20,
+	0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x6f, 0x71, 0x20, 0x7f, 0x59, 0xd2, 0x5b,
+	0x52, 0x32, 0xd6, 0xa2, 0xe1, 0xb6, 0xbf, 0x0d, 0x46, 0x13, 0xe2, 0x91, 0x1a, 0x86, 0x61, 0x97,
+	0x7f, 0xa9, 0x58, 0x9b, 0x2f, 0x66, 0xdf, 0x5e, 0x52, 0x03, 0xa3, 0xbd, 0xc6, 0xe4, 0x05, 0x58,
+	0x12, 0xb2, 0xaf, 0x41, 0x66, 0x6c, 0xac, 0xde, 0x9a, 0x29, 0xfb, 0x2b, 0x30, 0xc3, 0x6a, 0x1e,
+	0x51, 0xeb, 0x63, 0x27, 0xf4, 0xe9, 0x19, 0xe1, 0xc5, 0xed, 0xff, 0xc5, 0x02, 0xb4, 0xe6, 0x37,
+	0xdd, 0xcd, 0x3d, 0x41, 0x9c, 0x7f, 0xff, 0x47, 0x50, 0xe5, 0xd7, 0xea, 0x64, 0x08, 0xee, 0xc5,
+	0x96, 0x13, 0x86, 0x9a, 0x2e, 0xff, 0x45, 0xd1, 0x6e, 0x75, 0xa3, 0x18, 0x1d, 0xf7, 0xa2, 0x87,
+	0x3e, 0x48, 0x44, 0x44, 0xfd, 0x62, 0x2a, 0x22, 0xea, 0x8b, 0x99, 0x4e, 0x40, 0xe9, 0xde, 0xcb,
+	0x48, 0xa9, 0xf6, 0x77, 0x2d, 0x98, 0x5a, 0x4f, 0x84, 0x94, 0xbe, 0xc4, 0x3c, 0x22, 0x32, 0x6c,
+	0x54, 0x75, 0x56, 0x8a, 0x05, 0xf4, 0x89, 0xeb, 0x70, 0xff, 0xd4, 0x82, 0x38, 0x16, 0xdf, 0x31,
+	0x88, 0xdc, 0x8b, 0x86, 0xc8, 0x9d, 0x79, 0x7d, 0x51, 0xdd, 0xc9, 0x93, 0xb8, 0xd1, 0x4d, 0x35,
+	0x27, 0x05, 0x37, 0x97, 0x98, 0x0c, 0xdf, 0x67, 0x93, 0xe6, 0xc4, 0xa9, 0xd9, 0xf8, 0xfd, 0x12,
+	0x20, 0x85, 0xdb, 0x77, 0x14, 0xdd, 0x74, 0x8d, 0x27, 0x13, 0x45, 0x77, 0x17, 0x10, 0xf3, 0xe9,
+	0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0xa1, 0xb5, 0x3e, 0x9a, 0xc3, 0x90, 0x72, 0x89, 0xbd, 0x95,
+	0xa2, 0x86, 0x33, 0x5a, 0xd0, 0x7c, 0xb5, 0x06, 0xfb, 0xf5, 0xd5, 0x1a, 0xea, 0xf1, 0xe8, 0xfe,
+	0x57, 0x2c, 0x98, 0x50, 0xc3, 0xf4, 0x09, 0x79, 0xba, 0xa3, 0xfa, 0x93, 0x73, 0xae, 0xd4, 0xb4,
+	0x2e, 0x33, 0x61, 0xe0, 0x47, 0x58, 0xf0, 0x04, 0xa7, 0xe5, 0x3e, 0x22, 0x2a, 0xd8, 0x7b, 0x55,
+	0x04, 0x43, 0x10, 0xa5, 0x87, 0xfb, 0xd5, 0x09, 0xf5, 0x8f, 0xfb, 0x23, 0xc4, 0x55, 0xec, 0xbf,
+	0x4d, 0x37, 0xbb, 0xb9, 0x14, 0xd1, 0x9b, 0x30, 0xd8, 0xd9, 0x76, 0x42, 0x92, 0x78, 0xe2, 0x38,
+	0x58, 0xa3, 0x85, 0x87, 0xfb, 0xd5, 0x49, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0x6c, 0xe2,
+	0xf4, 0xe2, 0xec, 0x19, 0x9b, 0xf8, 0xdf, 0x59, 0x30, 0xb0, 0x4e, 0x4f, 0xaf, 0xa7, 0xcf, 0x02,
+	0xde, 0x35, 0x58, 0xc0, 0xb9, 0xbc, 0xb4, 0x67, 0xb9, 0xbb, 0x7f, 0x25, 0xb1, 0xfb, 0x2f, 0xe4,
+	0x52, 0x28, 0xde, 0xf8, 0x6d, 0x18, 0x63, 0xc9, 0xd4, 0xc4, 0x73, 0xce, 0xd7, 0x8d, 0x0d, 0x5f,
+	0x4d, 0x6c, 0xf8, 0x29, 0x0d, 0x55, 0xdb, 0xe9, 0x2f, 0xc1, 0xb0, 0x78, 0x1f, 0x98, 0x8c, 0x41,
+	0x21, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x57, 0x06, 0x23, 0x79, 0x1b, 0xfa, 0x27, 0x16, 0xcc, 0x05,
+	0xdc, 0xc5, 0xbf, 0xb9, 0xd4, 0x0d, 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x6e, 0xcb, 0xf5,
+	0xb6, 0x56, 0xb7, 0x3c, 0x5f, 0x15, 0x2f, 0x3f, 0x24, 0x8d, 0xae, 0x8a, 0xdb, 0x53, 0x90, 0x29,
+	0x4e, 0x3d, 0x93, 0x79, 0xed, 0x60, 0xbf, 0x3a, 0x87, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8,
+	0x9f, 0x5b, 0x70, 0x85, 0x27, 0x11, 0xeb, 0xbf, 0xff, 0x05, 0x1a, 0x8e, 0x9a, 0x24, 0x15, 0x13,
+	0xd9, 0x20, 0x41, 0x7b, 0xe1, 0x0b, 0x62, 0x40, 0xaf, 0xd4, 0x8e, 0xd6, 0x16, 0x3e, 0x6a, 0xe7,
+	0xec, 0xff, 0xa6, 0x0c, 0x13, 0x22, 0x86, 0xad, 0x38, 0x03, 0xde, 0x34, 0x96, 0xc4, 0xb3, 0x89,
+	0x25, 0x31, 0x63, 0x20, 0x3f, 0x19, 0xf6, 0x1f, 0xc2, 0x0c, 0x65, 0xce, 0x37, 0x88, 0x13, 0x44,
+	0xf7, 0x89, 0xc3, 0x5d, 0x30, 0xcb, 0x47, 0xe6, 0xfe, 0x4a, 0xb1, 0x7e, 0x2b, 0x49, 0x0c, 0xa7,
+	0xe9, 0xff, 0x30, 0x9d, 0x39, 0x1e, 0x4c, 0xa7, 0xc2, 0x10, 0x7f, 0x15, 0x46, 0xd5, 0xe3, 0x36,
+	0xc1, 0x74, 0x8a, 0xa3, 0x79, 0x27, 0x29, 0x70, 0xa5, 0x67, 0xfc, 0xb0, 0x32, 0x26, 0x67, 0xff,
+	0x72, 0xc9, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x21, 0xcb, 0x30, 0xd0, 0x2c, 0xd2, 0x68,
+	0xa7, 0x9a, 0x61, 0x7e, 0x66, 0xf3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0xee, 0xe8, 0xba, 0x4b,
+	0x8a, 0xd4, 0xd9, 0x29, 0x6a, 0x20, 0x5d, 0x61, 0x77, 0x09, 0x16, 0xf5, 0xd1, 0xd7, 0xb9, 0x27,
+	0xf2, 0x4d, 0xcf, 0x7f, 0xe0, 0x5d, 0xf7, 0x7d, 0x19, 0x04, 0xaa, 0x3f, 0x82, 0x33, 0xd2, 0xff,
+	0x58, 0x55, 0xc7, 0x26, 0xb5, 0xfe, 0xe2, 0xfa, 0xff, 0x28, 0xb0, 0xa4, 0x49, 0x66, 0x2c, 0x89,
+	0x10, 0x11, 0x98, 0x12, 0x01, 0x92, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xeb, 0xb7, 0x59, 0x3b, 0xb6,
+	0x00, 0xdd, 0x34, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x9b, 0x33, 0xe1, 0x15, 0xe2, 0x44, 0xdd, 0x80,
+	0x84, 0xe8, 0xcb, 0x50, 0x49, 0xdf, 0x8c, 0x85, 0x21, 0xc5, 0x62, 0xd2, 0xf3, 0xb9, 0x83, 0xfd,
+	0x6a, 0xa5, 0x9e, 0x83, 0x83, 0x73, 0x6b, 0xdb, 0x3f, 0x6f, 0x01, 0x7b, 0xc1, 0x7f, 0x0c, 0x92,
+	0xcf, 0x97, 0x4c, 0xc9, 0xa7, 0x92, 0x37, 0x9d, 0x39, 0x42, 0xcf, 0x1b, 0x7c, 0x0d, 0xd7, 0x02,
+	0xff, 0xe1, 0x9e, 0xf0, 0xfa, 0xea, 0x7d, 0x8d, 0xb3, 0xbf, 0x67, 0x01, 0xcb, 0x30, 0x86, 0xf9,
+	0xad, 0x5d, 0x1a, 0x38, 0x7a, 0x3b, 0x34, 0x7c, 0x19, 0x46, 0x36, 0xc5, 0xf0, 0x67, 0x28, 0x9d,
+	0x8c, 0x0e, 0x9b, 0xb4, 0xe5, 0xa4, 0x89, 0x97, 0xb8, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2,
+	0x82, 0xd9, 0xfc, 0x6a, 0xe8, 0x0e, 0x9c, 0x09, 0x48, 0xa3, 0x1b, 0x84, 0x74, 0x4b, 0x88, 0x0b,
+	0x90, 0x78, 0x01, 0xc6, 0xa7, 0xfa, 0x99, 0x83, 0xfd, 0xea, 0x19, 0x9c, 0x8d, 0x82, 0xf3, 0xea,
+	0xa2, 0xb7, 0x60, 0xb2, 0x1b, 0x72, 0xc9, 0x8f, 0x09, 0x5d, 0xa1, 0x08, 0x63, 0xcf, 0x1e, 0x49,
+	0xdd, 0x31, 0x20, 0x38, 0x81, 0x69, 0xff, 0x79, 0xbe, 0x1c, 0x95, 0xc7, 0x6b, 0x1b, 0x66, 0x3c,
+	0xed, 0x3f, 0x3d, 0x01, 0xe5, 0x55, 0xff, 0xf9, 0x5e, 0xa7, 0x3e, 0x3b, 0x2e, 0xb5, 0x18, 0x03,
+	0x09, 0x32, 0x38, 0x4d, 0xd9, 0xfe, 0x9b, 0x16, 0x9c, 0xd1, 0x11, 0xb5, 0x17, 0x87, 0xbd, 0xac,
+	0x80, 0x4b, 0x5a, 0x00, 0x3e, 0x7e, 0xcc, 0x5d, 0xce, 0x08, 0xc0, 0x77, 0x52, 0xa7, 0x5e, 0x18,
+	0x6d, 0x8f, 0xbf, 0x2d, 0xcd, 0x8a, 0xb6, 0xf7, 0x47, 0x16, 0x5f, 0x9f, 0x7a, 0xd7, 0xd1, 0x47,
+	0x30, 0xdd, 0x76, 0xa2, 0xc6, 0xf6, 0xf2, 0xc3, 0x4e, 0xc0, 0x8d, 0xbb, 0x72, 0x9c, 0x5e, 0xee,
+	0x35, 0x4e, 0xda, 0x47, 0xc6, 0xde, 0xe0, 0x6b, 0x09, 0x62, 0x38, 0x45, 0x1e, 0xdd, 0x87, 0x31,
+	0x56, 0xc6, 0xde, 0x62, 0x87, 0x45, 0xb2, 0x4c, 0x5e, 0x6b, 0xca, 0x39, 0x68, 0x2d, 0xa6, 0x83,
+	0x75, 0xa2, 0xf6, 0x2f, 0x95, 0x39, 0xd3, 0x60, 0x77, 0x8f, 0x97, 0x60, 0xb8, 0xe3, 0x37, 0x17,
+	0x57, 0x97, 0xb0, 0x98, 0x05, 0x75, 0xee, 0xd5, 0x78, 0x31, 0x96, 0x70, 0x74, 0x19, 0x46, 0xc4,
+	0x4f, 0x69, 0x8c, 0x67, 0x7b, 0x44, 0xe0, 0x85, 0x58, 0x41, 0xd1, 0x6b, 0x00, 0x9d, 0xc0, 0xdf,
+	0x75, 0x9b, 0x2c, 0xf6, 0x56, 0xd9, 0xf4, 0xeb, 0xab, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0xdb, 0x30,
+	0xd1, 0xf5, 0x42, 0x2e, 0x3f, 0x69, 0xc9, 0x38, 0x94, 0xc7, 0xd9, 0x1d, 0x1d, 0x88, 0x4d, 0x5c,
+	0x34, 0x0f, 0x43, 0x91, 0xc3, 0xfc, 0xd4, 0x06, 0xf3, 0x5f, 0x0c, 0x6c, 0x50, 0x0c, 0x3d, 0xed,
+	0x25, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x55, 0x19, 0x16, 0x81, 0x9f, 0x44, 0xe2, 0xa9, 0x4e, 0x7f,
+	0xa7, 0x96, 0x16, 0x14, 0x41, 0x3c, 0x01, 0x32, 0x68, 0xa1, 0xb7, 0x00, 0xc8, 0xc3, 0x88, 0x04,
+	0x9e, 0xd3, 0x52, 0xde, 0xa5, 0x4a, 0x90, 0x59, 0xf2, 0xd7, 0xfd, 0xe8, 0x4e, 0x48, 0x96, 0x15,
+	0x06, 0xd6, 0xb0, 0xed, 0x9f, 0x18, 0x03, 0x88, 0x2f, 0x1a, 0xe8, 0x11, 0x8c, 0x34, 0x9c, 0x8e,
+	0xd3, 0xe0, 0x39, 0x9d, 0xcb, 0x79, 0x0f, 0xcb, 0xe3, 0x1a, 0x73, 0x8b, 0x02, 0x9d, 0x1b, 0x6f,
+	0x64, 0x3e, 0x83, 0x11, 0x59, 0xdc, 0xd3, 0x60, 0xa3, 0xda, 0x43, 0xdf, 0xb1, 0x60, 0x4c, 0xc4,
+	0xb6, 0x62, 0x33, 0x54, 0xca, 0xb7, 0xb7, 0x69, 0xed, 0xcf, 0xc7, 0x35, 0x78, 0x17, 0x5e, 0x97,
+	0x2b, 0x54, 0x83, 0xf4, 0xec, 0x85, 0xde, 0x30, 0xfa, 0x9c, 0xbc, 0xdb, 0x96, 0x8d, 0xa1, 0x54,
+	0x77, 0xdb, 0x51, 0x76, 0xd4, 0xe8, 0xd7, 0xda, 0x3b, 0xc6, 0xb5, 0x76, 0x20, 0xff, 0x89, 0xb6,
+	0x21, 0x6f, 0xf7, 0xba, 0xd1, 0xa2, 0x9a, 0x1e, 0x03, 0x66, 0x30, 0xff, 0x85, 0xaf, 0x76, 0xb1,
+	0xeb, 0x11, 0xff, 0xe5, 0x5b, 0x30, 0xd5, 0x34, 0xa5, 0x16, 0xb1, 0x12, 0x5f, 0xcc, 0xa3, 0x9b,
+	0x10, 0x72, 0x62, 0x39, 0x25, 0x01, 0xc0, 0x49, 0xc2, 0xa8, 0xc6, 0x43, 0x02, 0xad, 0x7a, 0x9b,
+	0xbe, 0x78, 0x2e, 0x66, 0xe7, 0xce, 0xe5, 0x5e, 0x18, 0x91, 0x36, 0xc5, 0x8c, 0x85, 0x84, 0x75,
+	0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x7d, 0x18, 0x62, 0x4f, 0x3c, 0xc3, 0xca, 0x48, 0xbe, 0x59, 0xc3,
+	0x8c, 0x2e, 0x1c, 0x6f, 0x48, 0xf6, 0x37, 0xc4, 0x82, 0x02, 0xba, 0x21, 0x1f, 0x50, 0x87, 0xab,
+	0xde, 0x9d, 0x90, 0xb0, 0x07, 0xd4, 0xa3, 0x0b, 0xcf, 0xc7, 0x6f, 0xa3, 0x79, 0x79, 0x66, 0x72,
+	0x6c, 0xa3, 0x26, 0x15, 0xfb, 0xc4, 0x7f, 0x99, 0x73, 0x5b, 0x44, 0xea, 0xcb, 0xec, 0x9e, 0x99,
+	0x97, 0x3b, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, 0x45, 0x68, 0xbe, 0xeb, 0xc5, 0x83,
+	0xb3, 0x5e, 0xbc, 0x83, 0x6b, 0x0e, 0xd8, 0x69, 0xc4, 0x4b, 0xb0, 0xa8, 0x8f, 0x5c, 0x98, 0x0a,
+	0x0c, 0xf1, 0x42, 0x06, 0xd8, 0xbb, 0xd4, 0x9f, 0x10, 0xa3, 0x65, 0x19, 0x31, 0xc9, 0xe0, 0x24,
+	0x5d, 0xf4, 0xbe, 0x26, 0x28, 0x4d, 0x14, 0xdf, 0xfc, 0x7b, 0x89, 0x46, 0xb3, 0x3b, 0x30, 0x61,
+	0x30, 0x9b, 0xa7, 0x6a, 0x82, 0xf4, 0x60, 0x3a, 0xc9, 0x59, 0x9e, 0xaa, 0xe5, 0xf1, 0x2d, 0x98,
+	0x64, 0x1b, 0xe1, 0x81, 0xd3, 0x11, 0xac, 0xf8, 0xb2, 0xc1, 0x8a, 0xad, 0xcb, 0x65, 0x3e, 0x30,
+	0x72, 0x08, 0x62, 0xc6, 0x69, 0xff, 0x9d, 0x41, 0x51, 0x59, 0xed, 0x22, 0x74, 0x05, 0x46, 0x45,
+	0x07, 0x54, 0xaa, 0x3e, 0xc5, 0x18, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x43, 0x23, 0xab, 0xae,
+	0xbd, 0x50, 0x88, 0x33, 0x34, 0x2a, 0x08, 0xd6, 0xb0, 0xe8, 0xe5, 0xf7, 0xbe, 0xef, 0x47, 0xea,
+	0x0c, 0x56, 0x5b, 0x6d, 0x81, 0x95, 0x62, 0x01, 0xa5, 0x67, 0xef, 0x0e, 0x09, 0x3c, 0xd2, 0x32,
+	0x73, 0xd5, 0xa8, 0xb3, 0xf7, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0x95, 0x20, 0xfc, 0x90, 0xed, 0x5d,
+	0x71, 0xc5, 0x8e, 0x5f, 0x7c, 0xd4, 0x79, 0x90, 0x0f, 0x09, 0x47, 0x5f, 0x81, 0x33, 0x2a, 0xd8,
+	0xa6, 0x58, 0x99, 0xb2, 0xc5, 0x21, 0x43, 0x23, 0x76, 0x66, 0x31, 0x1b, 0x0d, 0xe7, 0xd5, 0x47,
+	0xef, 0xc2, 0xa4, 0xb8, 0x86, 0x49, 0x8a, 0xc3, 0xa6, 0xfb, 0xe2, 0x4d, 0x03, 0x8a, 0x13, 0xd8,
+	0x32, 0xdb, 0x0e, 0xbb, 0x9f, 0x48, 0x0a, 0x23, 0xe9, 0x6c, 0x3b, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0,
+	0x79, 0x98, 0xe2, 0x62, 0xa7, 0xeb, 0x6d, 0xf1, 0x39, 0x11, 0x4f, 0x60, 0xd5, 0x86, 0xbc, 0x6d,
+	0x82, 0x71, 0x12, 0x1f, 0x5d, 0x83, 0x71, 0x27, 0x68, 0x6c, 0xbb, 0x11, 0x69, 0xd0, 0x5d, 0xc5,
+	0x3c, 0x08, 0x35, 0xff, 0xcf, 0x79, 0x0d, 0x86, 0x0d, 0x4c, 0xf4, 0x1e, 0x0c, 0x84, 0x0f, 0x9c,
+	0x8e, 0xe0, 0x3e, 0xf9, 0xac, 0x5c, 0xad, 0x60, 0xee, 0xfa, 0x45, 0xff, 0x63, 0x56, 0xd3, 0x7e,
+	0x04, 0x27, 0x32, 0x82, 0x12, 0xd1, 0xa5, 0xe7, 0x74, 0x5c, 0x39, 0x2a, 0x89, 0x67, 0x1a, 0xf3,
+	0xb5, 0x55, 0x39, 0x1e, 0x1a, 0x16, 0x5d, 0xdf, 0x2c, 0x78, 0x51, 0x2d, 0x36, 0x24, 0xa9, 0xf5,
+	0xbd, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0xa4, 0x04, 0x53, 0x19, 0xe6, 0x41, 0x96, 0x1b, 0x3f,
+	0x71, 0xcf, 0x8b, 0x53, 0xe1, 0x9b, 0xe9, 0x9f, 0x4a, 0x47, 0x48, 0xff, 0x54, 0xee, 0x95, 0xfe,
+	0x69, 0xe0, 0xe3, 0xa4, 0x7f, 0x32, 0x47, 0x6c, 0xb0, 0xaf, 0x11, 0xcb, 0x48, 0x19, 0x35, 0x74,
+	0xc4, 0x94, 0x51, 0xc6, 0xa0, 0x0f, 0xf7, 0x31, 0xe8, 0xff, 0x69, 0x09, 0xa6, 0x93, 0x96, 0xc5,
+	0x63, 0xd0, 0xce, 0xbf, 0x6f, 0x68, 0xe7, 0x2f, 0xf7, 0x13, 0xf4, 0x20, 0x57, 0x53, 0x8f, 0x13,
+	0x9a, 0xfa, 0xcf, 0xf6, 0x45, 0xad, 0x58, 0x6b, 0xff, 0xb7, 0x4a, 0x70, 0x2a, 0xd3, 0xe0, 0x7a,
+	0x0c, 0x63, 0x73, 0xdb, 0x18, 0x9b, 0x57, 0xfb, 0x0e, 0x08, 0x91, 0x3b, 0x40, 0xf7, 0x12, 0x03,
+	0x74, 0xa5, 0x7f, 0x92, 0xc5, 0xa3, 0xf4, 0xfd, 0x32, 0x5c, 0xc8, 0xac, 0x17, 0x2b, 0xb7, 0x57,
+	0x0c, 0xe5, 0xf6, 0x6b, 0x09, 0xe5, 0xb6, 0x5d, 0x5c, 0xfb, 0xc9, 0x68, 0xbb, 0x45, 0x60, 0x04,
+	0x16, 0xde, 0xe5, 0x31, 0x35, 0xdd, 0x46, 0x60, 0x04, 0x45, 0x08, 0x9b, 0x74, 0x7f, 0x98, 0x34,
+	0xdc, 0xff, 0x83, 0x05, 0x67, 0x33, 0xe7, 0xe6, 0x18, 0xf4, 0x8c, 0xeb, 0xa6, 0x9e, 0xf1, 0xa5,
+	0xbe, 0x57, 0x6b, 0x8e, 0xe2, 0xf1, 0xbb, 0x43, 0x39, 0xdf, 0xc2, 0xd4, 0x1f, 0xb7, 0x61, 0xcc,
+	0x69, 0x34, 0x48, 0x18, 0xae, 0xb1, 0x54, 0x13, 0xdc, 0xf6, 0xfa, 0x2a, 0xbb, 0x9c, 0xc6, 0xc5,
+	0x87, 0xfb, 0xd5, 0xd9, 0x24, 0x89, 0x18, 0x8c, 0x75, 0x0a, 0xe8, 0xeb, 0x30, 0x12, 0xca, 0x24,
+	0xbf, 0x03, 0x8f, 0x9f, 0xe4, 0x97, 0x49, 0x92, 0x4a, 0xbd, 0xa3, 0x48, 0xa2, 0x3f, 0xa7, 0x87,
+	0xf7, 0x2a, 0x50, 0x6c, 0xf2, 0x4e, 0x3e, 0x46, 0x90, 0x2f, 0xf3, 0x39, 0x7c, 0xb9, 0xaf, 0xe7,
+	0xf0, 0xef, 0xc1, 0x74, 0xc8, 0xc3, 0xe5, 0xc6, 0x2e, 0x32, 0x7c, 0x2d, 0xb2, 0x88, 0x83, 0xf5,
+	0x04, 0x0c, 0xa7, 0xb0, 0xd1, 0x8a, 0x6c, 0x95, 0x39, 0x43, 0xf1, 0xe5, 0x79, 0x29, 0x6e, 0x51,
+	0x38, 0x44, 0x9d, 0x4c, 0x4e, 0x02, 0x1b, 0x7e, 0xad, 0x26, 0xfa, 0x3a, 0x00, 0x5d, 0x44, 0x42,
+	0x85, 0x33, 0x9c, 0xcf, 0x42, 0x29, 0x6f, 0x69, 0x66, 0xbe, 0xc0, 0x60, 0x11, 0x0d, 0x96, 0x14,
+	0x11, 0xac, 0x11, 0x44, 0x0e, 0x4c, 0xc4, 0xff, 0x30, 0xd9, 0x2c, 0x0a, 0xb0, 0xc6, 0x5a, 0x48,
+	0x12, 0x67, 0xe6, 0x8d, 0x25, 0x9d, 0x04, 0x36, 0x29, 0xa2, 0xaf, 0xc1, 0xd9, 0xdd, 0x5c, 0xbf,
+	0x23, 0x2e, 0x4b, 0x9e, 0x3f, 0xd8, 0xaf, 0x9e, 0xcd, 0xf7, 0x36, 0xca, 0xaf, 0x6f, 0xff, 0x8f,
+	0x00, 0xcf, 0x14, 0x70, 0x7a, 0x34, 0x6f, 0xfa, 0x0c, 0xbc, 0x9c, 0xd4, 0xab, 0xcc, 0x66, 0x56,
+	0x36, 0x14, 0x2d, 0x89, 0x0d, 0x55, 0xfa, 0xd8, 0x1b, 0xea, 0xa7, 0x2c, 0xed, 0x9a, 0xc5, 0x3d,
+	0xca, 0xbf, 0x74, 0xc4, 0x13, 0xec, 0x09, 0xaa, 0xc0, 0x36, 0x33, 0xf4, 0x48, 0xaf, 0xf5, 0xdd,
+	0x9d, 0xfe, 0x15, 0x4b, 0xbf, 0x9a, 0x9d, 0x60, 0x80, 0xab, 0x98, 0xae, 0x1f, 0xf5, 0xfb, 0x8f,
+	0x2b, 0xd9, 0xc0, 0xef, 0x5b, 0x70, 0x36, 0x55, 0xcc, 0xfb, 0x40, 0x42, 0x11, 0xce, 0x70, 0xfd,
+	0x63, 0x77, 0x5e, 0x12, 0xe4, 0xdf, 0x70, 0x43, 0x7c, 0xc3, 0xd9, 0x5c, 0xbc, 0x64, 0xd7, 0x7f,
+	0xf2, 0x5f, 0x55, 0x4f, 0xb0, 0x06, 0x4c, 0x44, 0x9c, 0xdf, 0x75, 0xd4, 0x81, 0x8b, 0x8d, 0x6e,
+	0x10, 0xc4, 0x8b, 0x35, 0x63, 0x73, 0xf2, 0xdb, 0xe2, 0xf3, 0x07, 0xfb, 0xd5, 0x8b, 0x8b, 0x3d,
+	0x70, 0x71, 0x4f, 0x6a, 0xc8, 0x03, 0xd4, 0x4e, 0x79, 0xf7, 0x31, 0x06, 0x90, 0xa3, 0x05, 0x4a,
+	0xfb, 0x02, 0x72, 0x3f, 0xdd, 0x0c, 0x1f, 0xc1, 0x0c, 0xca, 0xc7, 0xab, 0xbb, 0xf9, 0xc1, 0x64,
+	0x33, 0x98, 0xbd, 0x05, 0x17, 0x8a, 0x17, 0xd3, 0x91, 0x42, 0x50, 0xfc, 0x9e, 0x05, 0xe7, 0x0b,
+	0x43, 0xb3, 0xfd, 0x19, 0xbc, 0x2c, 0xd8, 0xdf, 0xb6, 0xe0, 0xd9, 0xcc, 0x1a, 0xc9, 0xc7, 0x83,
+	0x0d, 0x5a, 0xa8, 0x39, 0xc3, 0xc6, 0x41, 0x8a, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xbf, 0x68, 0xa9,
+	0xa7, 0xbf, 0xe8, 0x3f, 0xb5, 0x20, 0x75, 0xd4, 0x1f, 0x83, 0xe4, 0xb9, 0x6a, 0x4a, 0x9e, 0xcf,
+	0xf7, 0x33, 0x9a, 0x39, 0x42, 0xe7, 0xbf, 0x9d, 0x82, 0xd3, 0x39, 0x2f, 0xc8, 0x77, 0x61, 0x66,
+	0xab, 0x41, 0xcc, 0x90, 0x21, 0x45, 0xd1, 0xff, 0x0a, 0xe3, 0x8b, 0x2c, 0x9c, 0x3a, 0xd8, 0xaf,
+	0xce, 0xa4, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x6d, 0x0b, 0x4e, 0x3a, 0x0f, 0xc2, 0x65, 0x7a, 0x83,
+	0x70, 0x1b, 0x0b, 0x2d, 0xbf, 0xb1, 0x43, 0x05, 0x33, 0xb9, 0xad, 0xde, 0xc8, 0x54, 0x85, 0xdf,
+	0xab, 0xa7, 0xf0, 0x8d, 0xe6, 0x2b, 0x07, 0xfb, 0xd5, 0x93, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84,
+	0x45, 0x0e, 0x3f, 0x27, 0xda, 0x2e, 0x0a, 0x6a, 0x93, 0xf5, 0xd4, 0x9f, 0x8b, 0xc4, 0x12, 0x82,
+	0x15, 0x1d, 0xf4, 0x4d, 0x18, 0xdd, 0x92, 0xf1, 0x2b, 0x32, 0x44, 0xee, 0x78, 0x20, 0x8b, 0xa3,
+	0x7a, 0x70, 0x07, 0x1c, 0x85, 0x84, 0x63, 0xa2, 0xe8, 0x5d, 0x28, 0x7b, 0x9b, 0x61, 0x51, 0x08,
+	0xe9, 0x84, 0xa7, 0x35, 0x8f, 0x76, 0xb5, 0xbe, 0x52, 0xc7, 0xb4, 0x22, 0xba, 0x01, 0xe5, 0xe0,
+	0x7e, 0x53, 0xd8, 0x71, 0x32, 0x37, 0x29, 0x5e, 0x58, 0xca, 0xe9, 0x15, 0xa3, 0x84, 0x17, 0x96,
+	0x30, 0x25, 0x81, 0x6a, 0x30, 0xc8, 0x9e, 0x5d, 0x0b, 0xd1, 0x36, 0xf3, 0x2a, 0x5f, 0x10, 0xbe,
+	0x80, 0xbf, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x1b, 0x30, 0xd4, 0x70, 0xbd, 0x26, 0x09, 0x84,
+	0x2c, 0xfb, 0xb9, 0x4c, 0x8b, 0x0d, 0xc3, 0xc8, 0xa1, 0xc9, 0x0d, 0x18, 0x0c, 0x03, 0x0b, 0x5a,
+	0x8c, 0x2a, 0xe9, 0x6c, 0x6f, 0xca, 0x13, 0x2b, 0x9b, 0x2a, 0xe9, 0x6c, 0xaf, 0xd4, 0x0b, 0xa9,
+	0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xb7, 0xa0, 0xb4, 0xd9, 0x10, 0x4f, 0xaa, 0x33, 0xd5, 0x9b, 0x66,
+	0xc0, 0xb2, 0x85, 0xa1, 0x83, 0xfd, 0x6a, 0x69, 0x65, 0x11, 0x97, 0x36, 0x1b, 0x68, 0x1d, 0x86,
+	0x37, 0x79, 0xbc, 0x20, 0xa1, 0x1f, 0x7d, 0x31, 0x3b, 0x94, 0x51, 0x2a, 0xa4, 0x10, 0x7f, 0xdb,
+	0x2a, 0x00, 0x58, 0x12, 0x61, 0x09, 0xcf, 0x54, 0xdc, 0x23, 0x11, 0x29, 0x76, 0xee, 0x68, 0xb1,
+	0xaa, 0x44, 0xa0, 0x71, 0x45, 0x05, 0x6b, 0x14, 0xe9, 0xaa, 0x76, 0x1e, 0x75, 0x03, 0x96, 0x11,
+	0x45, 0x18, 0x66, 0x32, 0x57, 0xf5, 0xbc, 0x44, 0x2a, 0x5a, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1,
+	0x0e, 0x4c, 0xec, 0x86, 0x9d, 0x6d, 0x22, 0xb7, 0x34, 0x8b, 0x30, 0x98, 0x23, 0xcd, 0xde, 0x15,
+	0x88, 0x6e, 0x10, 0x75, 0x9d, 0x56, 0x8a, 0x0b, 0xb1, 0x6b, 0xcd, 0x5d, 0x9d, 0x18, 0x36, 0x69,
+	0xd3, 0xe1, 0xff, 0xa8, 0xeb, 0xdf, 0xdf, 0x8b, 0x88, 0x08, 0xf0, 0x9a, 0x39, 0xfc, 0x1f, 0x70,
+	0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0x8c, 0x7b, 0x4e, 0xe7, 0x07,
+	0xc2, 0x9f, 0x97, 0x48, 0x39, 0x83, 0xc2, 0xb8, 0x65, 0x4c, 0x8a, 0x71, 0xc9, 0xce, 0xb6, 0x1f,
+	0xf9, 0x5e, 0x82, 0x43, 0xcf, 0xe4, 0x73, 0xc9, 0x5a, 0x06, 0x7e, 0x9a, 0x4b, 0x66, 0x61, 0xe1,
+	0xcc, 0xb6, 0x50, 0x13, 0x26, 0x3b, 0x7e, 0x10, 0x3d, 0xf0, 0x03, 0xb9, 0xbe, 0x50, 0x81, 0xa2,
+	0xd4, 0xc0, 0x14, 0x2d, 0x32, 0xb7, 0x20, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x97, 0x61, 0x38, 0x6c,
+	0x38, 0x2d, 0xb2, 0x7a, 0xbb, 0x72, 0x22, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0xc3,
+	0x3d, 0x71, 0x14, 0x2c, 0xc9, 0xa1, 0x15, 0x18, 0x64, 0x39, 0xef, 0x59, 0x34, 0xe2, 0x9c, 0x78,
+	0xfe, 0xa9, 0x47, 0x3d, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x34, 0x05, 0x7e,
+	0x58, 0x39, 0x95, 0xbf, 0x07, 0x84, 0x82, 0xe1, 0x76, 0xbd, 0x68, 0x0f, 0x28, 0x24, 0x1c, 0x13,
+	0xa5, 0x9c, 0x99, 0x72, 0xd3, 0xd3, 0x05, 0x0e, 0x9b, 0xb9, 0xbc, 0x94, 0x71, 0x66, 0xca, 0x49,
+	0x29, 0x09, 0xfb, 0x37, 0x47, 0xd2, 0x32, 0x0b, 0xd3, 0x30, 0xfd, 0xc7, 0x56, 0xca, 0x63, 0xe3,
+	0xf3, 0xfd, 0x2a, 0xbc, 0x9f, 0xe0, 0xc5, 0xf5, 0xdb, 0x16, 0x9c, 0xee, 0x64, 0x7e, 0x88, 0x10,
+	0x00, 0xfa, 0xd3, 0x9b, 0xf3, 0x4f, 0x57, 0x91, 0xab, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xa4, 0x72,
+	0xa0, 0xfc, 0xb1, 0x95, 0x03, 0x6b, 0x30, 0xd2, 0xe0, 0x37, 0x39, 0x99, 0x3c, 0xa2, 0xaf, 0xb8,
+	0xab, 0xdc, 0x4e, 0x2b, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xb4, 0x05, 0xe7, 0x93, 0x5d, 0xc7, 0x84,
+	0x81, 0x85, 0xbb, 0x26, 0x57, 0x6b, 0xad, 0x88, 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf2, 0x61, 0x2f,
+	0x04, 0x5c, 0xdc, 0x18, 0x5a, 0xca, 0xd0, 0xab, 0x0d, 0x99, 0x36, 0xc9, 0x3e, 0x74, 0x6b, 0x6f,
+	0xc0, 0x78, 0xdb, 0xef, 0x7a, 0x91, 0xf0, 0xba, 0x14, 0xae, 0x5b, 0xcc, 0x65, 0x69, 0x4d, 0x2b,
+	0xc7, 0x06, 0x56, 0x42, 0x23, 0x37, 0xf2, 0xd8, 0x1a, 0xb9, 0x0f, 0x61, 0xdc, 0xd3, 0x1e, 0x24,
+	0x14, 0xdd, 0x60, 0x85, 0x76, 0x51, 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0x58, 0x5b,
+	0x06, 0x1f, 0x4f, 0x5b, 0x76, 0xac, 0x57, 0x62, 0xfb, 0xef, 0x95, 0x32, 0x6e, 0x0c, 0x5c, 0x2b,
+	0xf7, 0x8e, 0xa9, 0x95, 0xbb, 0x94, 0xd4, 0xca, 0xa5, 0x4c, 0x55, 0x86, 0x42, 0xae, 0xff, 0x0c,
+	0xa6, 0x7d, 0xc7, 0xd2, 0xfe, 0x0b, 0x16, 0x9c, 0x61, 0xb6, 0x0f, 0xda, 0xc0, 0xc7, 0xb6, 0x77,
+	0x30, 0x87, 0xd8, 0x5b, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x0b, 0x2e, 0xf6, 0x3a, 0x77, 0x99,
+	0x7f, 0x71, 0x53, 0xb9, 0x57, 0xc4, 0xfe, 0xc5, 0xcd, 0xd5, 0x25, 0xcc, 0x20, 0xfd, 0x86, 0x5d,
+	0xb4, 0xff, 0x4f, 0x0b, 0xca, 0x35, 0xbf, 0x79, 0x0c, 0x37, 0xfa, 0x2f, 0x19, 0x37, 0xfa, 0x67,
+	0xb2, 0x4f, 0xfc, 0x66, 0xae, 0xb1, 0x6f, 0x39, 0x61, 0xec, 0x3b, 0x9f, 0x47, 0xa0, 0xd8, 0xb4,
+	0xf7, 0xb7, 0xcb, 0x30, 0x56, 0xf3, 0x9b, 0x6a, 0x9f, 0xfd, 0x77, 0x8f, 0xf3, 0x8c, 0x28, 0x37,
+	0x67, 0x99, 0x46, 0x99, 0xf9, 0x13, 0xcb, 0xa8, 0x17, 0x7f, 0xc6, 0x5e, 0x13, 0xdd, 0x23, 0xee,
+	0xd6, 0x76, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0xbe, 0xd7, 0x44, 0x7f, 0x58, 0x86, 0xa9, 0x44, 0xeb,
+	0xa8, 0x05, 0x13, 0x2d, 0xdd, 0x94, 0x24, 0xd6, 0xe9, 0x63, 0x59, 0xa1, 0xc4, 0x6b, 0x0c, 0xad,
+	0x08, 0x9b, 0xc4, 0xd1, 0x1c, 0x80, 0xa7, 0xfb, 0xa4, 0xab, 0x98, 0xd0, 0x9a, 0x3f, 0xba, 0x86,
+	0x81, 0xde, 0x84, 0xb1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x6b, 0xef, 0xa6, 0x8a, 0x8f, 0xac, 0x5c,
+	0x96, 0x37, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x43, 0x98, 0x51, 0x44, 0xea, 0x4f, 0xc0, 0xbc, 0xc6,
+	0xd4, 0x26, 0xeb, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x0b, 0x26, 0x99, 0xef, 0x34, 0xab, 0x7f,
+	0x93, 0xec, 0xc9, 0xe0, 0xd2, 0x4c, 0xc2, 0x5e, 0x33, 0x20, 0x38, 0x81, 0x89, 0x16, 0x61, 0xa6,
+	0xed, 0x86, 0x89, 0xea, 0x43, 0xac, 0x3a, 0xeb, 0xc0, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0x2f,
+	0x88, 0x39, 0xf6, 0x22, 0xf7, 0xd3, 0xed, 0xf8, 0xc9, 0xde, 0x8e, 0xdf, 0xb7, 0x60, 0x9a, 0xb6,
+	0xce, 0x1c, 0x42, 0xa5, 0x20, 0xa5, 0xd2, 0x8f, 0x58, 0x05, 0xe9, 0x47, 0x2e, 0x51, 0xb6, 0xdd,
+	0xf4, 0xbb, 0x91, 0xd0, 0x8e, 0x6a, 0x7c, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0xc4,
+	0xab, 0x7b, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0x64, 0x20, 0x3b, 0x3b, 0x09, 0x0f,
+	0x32, 0x2f, 0xfc, 0xe8, 0x84, 0x48, 0xab, 0x05, 0x99, 0x97, 0x0e, 0x76, 0x31, 0x8e, 0xfd, 0xd7,
+	0xca, 0x50, 0xa9, 0xf9, 0xcd, 0x45, 0x12, 0x44, 0xee, 0xa6, 0xdb, 0x70, 0x22, 0xa2, 0xe5, 0xdb,
+	0x7d, 0x0d, 0x80, 0x3d, 0x22, 0x0b, 0xb2, 0x22, 0xa8, 0xd7, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x2a,
+	0xd9, 0x21, 0x7b, 0xda, 0xc9, 0xab, 0xa4, 0x92, 0x9b, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0xc5, 0x42,
+	0x19, 0x2d, 0x3f, 0xec, 0xb8, 0x01, 0xcf, 0x4c, 0x4e, 0x1a, 0xbe, 0xd7, 0x0c, 0x45, 0xe0, 0xb7,
+	0x8a, 0x08, 0x44, 0x94, 0x82, 0xe3, 0xcc, 0x5a, 0xa8, 0x06, 0x27, 0x1b, 0x01, 0x69, 0x12, 0x2f,
+	0x72, 0x9d, 0xd6, 0x42, 0xd7, 0x6b, 0xb6, 0x78, 0x4a, 0x9e, 0x01, 0x23, 0x83, 0xe8, 0xc9, 0xc5,
+	0x0c, 0x1c, 0x9c, 0x59, 0x53, 0x7c, 0x0a, 0x23, 0x32, 0x98, 0xfa, 0x14, 0x56, 0x4f, 0xc2, 0x59,
+	0xe3, 0xf1, 0x10, 0x2e, 0x6e, 0x3b, 0xae, 0xc7, 0xea, 0x0d, 0x25, 0x1a, 0xcf, 0xc0, 0xc1, 0x99,
+	0x35, 0xed, 0x3f, 0x2d, 0xc3, 0x38, 0x9d, 0x18, 0xe5, 0x71, 0xf3, 0x86, 0xe1, 0x71, 0x73, 0x31,
+	0xe1, 0x71, 0x33, 0xad, 0xe3, 0x6a, 0xfe, 0x35, 0xef, 0x03, 0xf2, 0x45, 0x52, 0x82, 0xeb, 0xc4,
+	0x23, 0x7c, 0xc8, 0x98, 0x92, 0xb1, 0x1c, 0xfb, 0xa3, 0xdc, 0x4e, 0x61, 0xe0, 0x8c, 0x5a, 0x9f,
+	0xfa, 0xea, 0x1c, 0xaf, 0xaf, 0xce, 0x6f, 0x59, 0x6c, 0x05, 0x2c, 0xad, 0xd7, 0xb9, 0x13, 0x39,
+	0xba, 0x0a, 0x63, 0xec, 0x18, 0x63, 0xb1, 0x3c, 0xa4, 0x4b, 0x0b, 0xcb, 0x6e, 0xbb, 0x1e, 0x17,
+	0x63, 0x1d, 0x07, 0x5d, 0x86, 0x91, 0x90, 0x38, 0x41, 0x63, 0x5b, 0x9d, 0xe1, 0xc2, 0xff, 0x84,
+	0x97, 0x61, 0x05, 0x45, 0x1f, 0xc4, 0x11, 0xe1, 0xcb, 0xf9, 0x1e, 0xe9, 0x7a, 0x7f, 0x38, 0x1f,
+	0xcc, 0x0f, 0x03, 0x6f, 0xdf, 0x03, 0x94, 0xc6, 0xef, 0xe3, 0x89, 0x5f, 0xd5, 0x8c, 0x59, 0x3c,
+	0x9a, 0x8a, 0x57, 0xfc, 0xef, 0x2d, 0x98, 0xac, 0xf9, 0x4d, 0xca, 0x9f, 0x7f, 0x98, 0x98, 0xb1,
+	0x9e, 0xc1, 0x63, 0xa8, 0x20, 0x83, 0xc7, 0x81, 0x05, 0x17, 0xd8, 0xe7, 0x47, 0xc4, 0x6b, 0xc6,
+	0x06, 0x4f, 0xdd, 0xdf, 0xe3, 0x01, 0x4c, 0x05, 0x3c, 0x7c, 0xd7, 0x9a, 0xd3, 0xe9, 0xb8, 0xde,
+	0x96, 0x7c, 0xdf, 0xf6, 0x46, 0xe1, 0xbb, 0x8d, 0x24, 0x49, 0x11, 0x02, 0x4c, 0x77, 0x54, 0x35,
+	0x88, 0xe2, 0x64, 0x2b, 0x3c, 0x2b, 0x8d, 0xd6, 0x1f, 0x2d, 0x41, 0xa5, 0x96, 0x95, 0x26, 0x81,
+	0x80, 0xd3, 0x75, 0xec, 0xe7, 0x60, 0xb0, 0xe6, 0x37, 0x7b, 0x04, 0x8f, 0xfe, 0x3b, 0x16, 0x0c,
+	0xd7, 0xfc, 0xe6, 0x31, 0x98, 0x10, 0xdf, 0x31, 0x4d, 0x88, 0x67, 0x72, 0x36, 0x47, 0x8e, 0xd5,
+	0xf0, 0x9f, 0x0d, 0xc0, 0x04, 0xed, 0xa7, 0xbf, 0x25, 0xd7, 0xab, 0xb1, 0x36, 0xac, 0x3e, 0xd6,
+	0x06, 0xbd, 0xd0, 0xfa, 0xad, 0x96, 0xff, 0x20, 0xb9, 0x76, 0x57, 0x58, 0x29, 0x16, 0x50, 0xf4,
+	0x0a, 0x8c, 0x74, 0x02, 0xb2, 0xeb, 0xfa, 0xe2, 0xa6, 0xa8, 0x19, 0x64, 0x6b, 0xa2, 0x1c, 0x2b,
+	0x0c, 0xf4, 0x06, 0x8c, 0x87, 0xae, 0x47, 0xa5, 0x62, 0x7e, 0xf4, 0x0e, 0xb0, 0x83, 0x81, 0xe7,
+	0xd2, 0xd3, 0xca, 0xb1, 0x81, 0x85, 0xee, 0xc1, 0x28, 0xfb, 0xcf, 0x78, 0xeb, 0xe0, 0x91, 0x79,
+	0xab, 0x48, 0x94, 0x2e, 0x08, 0xe0, 0x98, 0x16, 0x15, 0x38, 0x22, 0x99, 0x8f, 0x2a, 0x14, 0x41,
+	0x84, 0x95, 0xc0, 0xa1, 0x32, 0x55, 0x85, 0x58, 0xc3, 0x42, 0x2f, 0xc3, 0x68, 0xe4, 0xb8, 0xad,
+	0x5b, 0xae, 0xc7, 0x3c, 0x51, 0x68, 0xff, 0x45, 0xbe, 0x72, 0x51, 0x88, 0x63, 0x38, 0xbd, 0xd5,
+	0xb0, 0xd8, 0x6a, 0x0b, 0x7b, 0x91, 0xc8, 0xa2, 0x59, 0xe6, 0xb7, 0x9a, 0x5b, 0xaa, 0x14, 0x6b,
+	0x18, 0x68, 0x1b, 0xce, 0xb9, 0x1e, 0xcb, 0x3b, 0x47, 0xea, 0x3b, 0x6e, 0x67, 0xe3, 0x56, 0xfd,
+	0x2e, 0x09, 0xdc, 0xcd, 0xbd, 0x05, 0xa7, 0xb1, 0x43, 0xbc, 0x26, 0x53, 0x7a, 0x8d, 0x2c, 0x3c,
+	0x2f, 0xba, 0x78, 0x6e, 0xb5, 0x00, 0x17, 0x17, 0x52, 0x42, 0x36, 0xe5, 0x39, 0x01, 0x71, 0xda,
+	0x42, 0xbb, 0xc5, 0x73, 0x56, 0xb1, 0x12, 0x2c, 0x20, 0xf6, 0xeb, 0x6c, 0x4f, 0xdc, 0xae, 0xa3,
+	0xcf, 0x1a, 0x3c, 0xf4, 0xb4, 0xce, 0x43, 0x0f, 0xf7, 0xab, 0x43, 0xb7, 0xeb, 0x5a, 0x9c, 0xad,
+	0x6b, 0x70, 0xaa, 0xe6, 0x37, 0x6b, 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x03, 0x27, 0x68, 0xca, 0x25,
+	0x58, 0x95, 0x91, 0xc6, 0x28, 0x67, 0x18, 0xe4, 0x6c, 0xd6, 0x88, 0x22, 0xf6, 0x3a, 0xbb, 0x9f,
+	0x1c, 0xf1, 0x61, 0x77, 0x83, 0x49, 0xca, 0x2a, 0xbb, 0xe3, 0x75, 0x27, 0x22, 0xe8, 0x36, 0x4c,
+	0x34, 0x74, 0xd9, 0x44, 0x54, 0x7f, 0x49, 0x9e, 0xe8, 0x86, 0xe0, 0x92, 0x29, 0xcc, 0x98, 0xf5,
+	0xed, 0xdf, 0xb7, 0x44, 0x2b, 0x1a, 0xd7, 0xe8, 0xe3, 0x60, 0x59, 0xcc, 0x62, 0x4e, 0xfc, 0xa6,
+	0x7a, 0xaa, 0x5f, 0xc6, 0x84, 0xbe, 0x06, 0x67, 0x8d, 0x42, 0xe9, 0x14, 0xa2, 0xe5, 0xdf, 0x67,
+	0x9a, 0x49, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x06, 0xa7, 0x93, 0xdf, 0x25, 0x38, 0xfa,
+	0x63, 0x7e, 0x5d, 0xe9, 0x68, 0x5f, 0x67, 0xbf, 0x09, 0x33, 0x35, 0x5f, 0x8b, 0xa2, 0xc2, 0xe6,
+	0xaf, 0x77, 0x30, 0xb7, 0x5f, 0x1e, 0x61, 0x67, 0x7d, 0x22, 0x65, 0x23, 0xfa, 0x06, 0x4c, 0x86,
+	0x84, 0x45, 0x30, 0x94, 0x3a, 0xea, 0x82, 0xa8, 0x0c, 0xf5, 0x65, 0x1d, 0x93, 0xdf, 0xc3, 0xcd,
+	0x32, 0x9c, 0xa0, 0x86, 0xda, 0x30, 0xf9, 0xc0, 0xf5, 0x9a, 0xfe, 0x83, 0x50, 0xd2, 0x1f, 0xc9,
+	0x37, 0x78, 0xdd, 0xe3, 0x98, 0x89, 0x3e, 0x1a, 0xcd, 0xdd, 0x33, 0x88, 0xe1, 0x04, 0x71, 0xca,
+	0x6a, 0x82, 0xae, 0x37, 0x1f, 0xde, 0x09, 0x49, 0x20, 0xe2, 0x2b, 0x32, 0x56, 0x83, 0x65, 0x21,
+	0x8e, 0xe1, 0x94, 0xd5, 0xb0, 0x3f, 0x2c, 0xac, 0x03, 0xe3, 0x65, 0x82, 0xd5, 0x60, 0x55, 0x8a,
+	0x35, 0x0c, 0xca, 0x8a, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, 0xbe, 0x1f, 0x49, 0xe6, 0xcd, 0xb2, 0xea,
+	0x6a, 0xe5, 0xd8, 0xc0, 0xca, 0x89, 0xe6, 0x38, 0x70, 0xd4, 0x68, 0x8e, 0x28, 0x2a, 0x88, 0x64,
+	0xc1, 0xe3, 0x91, 0x5f, 0x2b, 0x8a, 0x64, 0x71, 0xf8, 0x58, 0x51, 0x2e, 0xa8, 0xc0, 0xb3, 0x29,
+	0x06, 0x68, 0x90, 0x87, 0xab, 0x64, 0x26, 0xf9, 0x3a, 0x1f, 0x1d, 0x09, 0x43, 0xcb, 0x30, 0x1c,
+	0xee, 0x85, 0x8d, 0xa8, 0x15, 0x16, 0x65, 0x4e, 0xae, 0x33, 0x94, 0x58, 0x1e, 0xe5, 0xff, 0x43,
+	0x2c, 0xeb, 0xa2, 0x06, 0x9c, 0x10, 0x14, 0x17, 0xb7, 0x1d, 0x4f, 0x65, 0x56, 0xe5, 0xbe, 0xb7,
+	0x57, 0x0f, 0xf6, 0xab, 0x27, 0x44, 0xcb, 0x3a, 0xf8, 0x70, 0xbf, 0x4a, 0xb7, 0x64, 0x06, 0x04,
+	0x67, 0x51, 0xe3, 0x4b, 0xbe, 0xd1, 0xf0, 0xdb, 0x9d, 0x5a, 0xe0, 0x6f, 0xba, 0x2d, 0x52, 0xe4,
+	0xd6, 0x50, 0x37, 0x30, 0xc5, 0x92, 0x37, 0xca, 0x70, 0x82, 0x1a, 0xba, 0x0f, 0x53, 0x4e, 0xa7,
+	0x33, 0x1f, 0xb4, 0xfd, 0x40, 0x36, 0x30, 0x96, 0x6f, 0x1f, 0x9b, 0x37, 0x51, 0x79, 0x62, 0xd5,
+	0x44, 0x21, 0x4e, 0x12, 0xa4, 0x03, 0x25, 0x36, 0x9a, 0x31, 0x50, 0x13, 0xf1, 0x40, 0x89, 0x7d,
+	0x99, 0x31, 0x50, 0x19, 0x10, 0x9c, 0x45, 0xcd, 0xfe, 0xf3, 0xec, 0x76, 0xc3, 0xa2, 0x9d, 0xb3,
+	0x47, 0x6e, 0x6d, 0x98, 0xe8, 0x30, 0xb6, 0x2f, 0x92, 0x1e, 0x0a, 0x56, 0xf1, 0x46, 0x9f, 0x6a,
+	0xf8, 0x07, 0x2c, 0xab, 0xb3, 0xe1, 0x8e, 0x5d, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xaf, 0x67,
+	0x99, 0xe8, 0x58, 0xe7, 0xba, 0xf5, 0x61, 0xf1, 0xe4, 0x57, 0x48, 0xc9, 0xb3, 0xf9, 0x56, 0xac,
+	0x78, 0x7d, 0x89, 0x67, 0xc3, 0x58, 0xd6, 0x45, 0x5f, 0x87, 0x49, 0xd7, 0x73, 0xe3, 0x24, 0xeb,
+	0x61, 0xe5, 0x64, 0x7e, 0x2c, 0x39, 0x85, 0xa5, 0x27, 0x44, 0xd5, 0x2b, 0xe3, 0x04, 0x31, 0xf4,
+	0x01, 0xf3, 0x50, 0x96, 0xa4, 0x4b, 0xfd, 0x90, 0xd6, 0x9d, 0x91, 0x25, 0x59, 0x8d, 0x08, 0xea,
+	0xc2, 0x89, 0x74, 0xb2, 0xf9, 0xb0, 0x62, 0xe7, 0x5f, 0x00, 0xd3, 0xf9, 0xe2, 0xe3, 0xcc, 0x95,
+	0x69, 0x58, 0x88, 0xb3, 0xe8, 0xa3, 0x5b, 0xc9, 0x54, 0xe0, 0x65, 0xc3, 0xfe, 0x95, 0x4a, 0x07,
+	0x3e, 0x51, 0x98, 0x05, 0x7c, 0x0b, 0xce, 0x6b, 0x79, 0x8d, 0xaf, 0x07, 0x0e, 0xf3, 0x90, 0x73,
+	0xd9, 0x69, 0xa4, 0x09, 0xb5, 0xcf, 0x1e, 0xec, 0x57, 0xcf, 0x6f, 0x14, 0x21, 0xe2, 0x62, 0x3a,
+	0xe8, 0x36, 0x9c, 0xe2, 0x91, 0x90, 0x96, 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x92, 0x9a, 0x39, 0xef,
+	0x3a, 0x7b, 0xb0, 0x5f, 0x3d, 0x35, 0x9f, 0x85, 0x80, 0xb3, 0xeb, 0xa1, 0x77, 0x60, 0xb4, 0xe9,
+	0x49, 0x2e, 0x3b, 0x64, 0xa4, 0x8e, 0x1e, 0x5d, 0x5a, 0xaf, 0xab, 0xef, 0x8f, 0xff, 0xe0, 0xb8,
+	0x02, 0xda, 0xe2, 0x06, 0x58, 0xa5, 0x35, 0x1f, 0x4e, 0x05, 0xc8, 0x4d, 0x1a, 0x96, 0x8c, 0xd0,
+	0x22, 0xdc, 0xf3, 0x40, 0x3d, 0x3f, 0x35, 0xa2, 0x8e, 0x18, 0x84, 0xd1, 0xfb, 0x80, 0x44, 0xbe,
+	0xaf, 0xf9, 0x06, 0xcb, 0xa8, 0xa9, 0x79, 0x45, 0x2b, 0x3d, 0x49, 0x3d, 0x85, 0x81, 0x33, 0x6a,
+	0xa1, 0x1b, 0x94, 0x3d, 0xea, 0xa5, 0x82, 0xfd, 0x4a, 0x7d, 0x56, 0x65, 0x89, 0x74, 0x02, 0xc2,
+	0x1c, 0x79, 0x4d, 0x8a, 0x38, 0x51, 0x0f, 0x35, 0xe1, 0x9c, 0xd3, 0x8d, 0x7c, 0x66, 0xdb, 0x36,
+	0x51, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0xb7, 0x92, 0x11, 0x16, 0x78, 0xf7, 0xdc, 0x7c, 0x01, 0x1e,
+	0x2e, 0xa4, 0x42, 0xaf, 0x53, 0x74, 0x2c, 0x34, 0xb3, 0xb3, 0x11, 0x25, 0x81, 0xfb, 0x62, 0x48,
+	0x0c, 0xf4, 0x26, 0x8c, 0x6d, 0xfb, 0x61, 0xb4, 0x4e, 0xa2, 0x07, 0x7e, 0xb0, 0x23, 0x12, 0x8c,
+	0xc4, 0x49, 0x9d, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x4b, 0x30, 0xcc, 0x9c, 0x1e, 0x57, 0x97, 0xd8,
+	0x59, 0x3b, 0x12, 0xf3, 0x98, 0x1b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0xad, 0x2d, 0x32, 0x76,
+	0x9c, 0x40, 0x5d, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x76, 0x02, 0x52, 0x0b, 0xfc,
+	0x06, 0x09, 0xb5, 0x54, 0x62, 0xcf, 0xf0, 0xf4, 0x29, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76,
+	0x3d, 0x44, 0xd2, 0x39, 0xbd, 0x27, 0xf3, 0x8d, 0xfe, 0x69, 0x71, 0xb0, 0xcf, 0xb4, 0xde, 0x1e,
+	0x4c, 0xab, 0x6c, 0xe2, 0x3c, 0x61, 0x4a, 0x58, 0x99, 0xca, 0xcf, 0xe9, 0x9f, 0xf9, 0xd6, 0x47,
+	0xb9, 0x51, 0xac, 0x26, 0x28, 0xe1, 0x14, 0x6d, 0x23, 0xb2, 0xf3, 0x74, 0xcf, 0xc8, 0xce, 0x57,
+	0x60, 0x34, 0xec, 0xde, 0x6f, 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0xef, 0x98, 0x76, 0x71, 0xaf, 0x4b,
+	0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x8c, 0x38, 0xd2, 0x47, 0x02, 0xe5, 0x07, 0xad, 0x54, 0x9e, 0x11,
+	0x3c, 0x8e, 0x9b, 0xf4, 0x8a, 0x50, 0x75, 0xd1, 0xdb, 0x30, 0x21, 0x02, 0xe3, 0x08, 0x7d, 0xfc,
+	0x09, 0xf3, 0x29, 0x7f, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x81, 0xb1, 0xc8, 0x6f, 0x09, 0x45,
+	0x6e, 0x58, 0x39, 0x9d, 0x1f, 0x5b, 0x7a, 0x43, 0xa1, 0xe9, 0xd6, 0x3b, 0x55, 0x15, 0xeb, 0x74,
+	0xd0, 0x06, 0x5f, 0xef, 0x2c, 0x71, 0x18, 0x09, 0x2b, 0x67, 0xf2, 0xcf, 0x24, 0x95, 0x5f, 0xcc,
+	0xdc, 0x0e, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, 0x30, 0xd3, 0x09, 0x5c, 0x9f, 0xad, 0x09, 0xe5,
+	0xf3, 0x51, 0x31, 0x75, 0x48, 0xb5, 0x24, 0x02, 0x4e, 0xd7, 0x61, 0x71, 0x8d, 0x44, 0x61, 0xe5,
+	0x2c, 0x4f, 0x75, 0xc8, 0xf5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x9c, 0x98, 0xeb, 0x29,
+	0x2b, 0xb3, 0xf9, 0xd1, 0x32, 0x74, 0x7d, 0x26, 0x97, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x4d,
+	0x98, 0x0c, 0xf4, 0x1b, 0x70, 0x58, 0x39, 0x57, 0xe0, 0x79, 0x9e, 0xb8, 0x2e, 0xc7, 0x02, 0x81,
+	0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1e, 0x4c, 0x8b, 0xa0, 0x1f, 0xf1, 0x30, 0x9d, 0x8f, 0x5f,
+	0xe7, 0xe1, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0xaa, 0x41, 0xe7, 0x7e, 0x8b, 0x08, 0xd6, 0x77, 0xcb,
+	0xf5, 0x76, 0xc2, 0xca, 0x05, 0xc6, 0x1f, 0x44, 0xaa, 0xc1, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b,
+	0x30, 0xdd, 0x09, 0x08, 0x69, 0xb3, 0x7b, 0x92, 0x38, 0xcf, 0xaa, 0x3c, 0xac, 0x17, 0xed, 0x49,
+	0x2d, 0x01, 0x3b, 0xcc, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x01, 0x8c, 0xf8, 0xbb, 0x24, 0xd8, 0x26,
+	0x4e, 0xb3, 0x72, 0xb1, 0xe0, 0xcd, 0xa8, 0x38, 0xdc, 0x6e, 0x0b, 0xdc, 0x84, 0x4b, 0x9d, 0x2c,
+	0xee, 0xed, 0x52, 0x27, 0x1b, 0x43, 0xff, 0x89, 0x05, 0x67, 0xa5, 0x91, 0xba, 0xde, 0xa1, 0xa3,
+	0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0x40, 0x54, 0xcf, 0xe6, 0x07, 0x67, 0xda, 0xc8, 0xa9, 0xa4,
+	0x4c, 0x25, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0xbd, 0xd9, 0x87, 0x24, 0x92, 0xcc, 0x68,
+	0x3e, 0x5c, 0xf9, 0x60, 0x69, 0xbd, 0xf2, 0x1c, 0x8f, 0xa2, 0x45, 0x37, 0x43, 0x3d, 0x09, 0xc4,
+	0x69, 0x7c, 0x74, 0x15, 0x4a, 0x7e, 0x58, 0x79, 0x9e, 0xad, 0xed, 0xb3, 0x39, 0xe3, 0x78, 0xbb,
+	0xce, 0x5d, 0xab, 0x6f, 0xd7, 0x71, 0xc9, 0x0f, 0x65, 0xba, 0x3f, 0x7a, 0x9d, 0x0d, 0x2b, 0x2f,
+	0x70, 0xc5, 0xba, 0x4c, 0xf7, 0xc7, 0x0a, 0x71, 0x0c, 0x47, 0xdb, 0x30, 0x15, 0x1a, 0x6a, 0x83,
+	0xb0, 0x72, 0x89, 0x8d, 0xd4, 0x0b, 0x79, 0x93, 0x66, 0x60, 0x6b, 0x79, 0xb8, 0x4c, 0x2a, 0x38,
+	0x49, 0x96, 0xef, 0x2e, 0x4d, 0x71, 0x11, 0x56, 0x5e, 0xec, 0xb1, 0xbb, 0x34, 0x64, 0x7d, 0x77,
+	0xe9, 0x34, 0x70, 0x82, 0x26, 0xba, 0xa3, 0x3f, 0xc8, 0xbd, 0x9c, 0xef, 0xa6, 0x9b, 0xf9, 0x14,
+	0x77, 0x22, 0xf7, 0x19, 0xee, 0x7b, 0x30, 0x2d, 0xcf, 0x12, 0xba, 0x32, 0x03, 0xb7, 0x49, 0x2a,
+	0x2f, 0xc5, 0x9b, 0xf6, 0x46, 0x02, 0x86, 0x53, 0xd8, 0xb3, 0x3f, 0x02, 0x33, 0x29, 0x39, 0xee,
+	0x28, 0xef, 0x9b, 0x66, 0x77, 0x60, 0xc2, 0xd8, 0x2b, 0x4f, 0xd7, 0xfd, 0x6d, 0x0c, 0x46, 0x95,
+	0x5b, 0x52, 0x8e, 0x39, 0x72, 0xe6, 0xb1, 0xcc, 0x91, 0x57, 0x4c, 0xef, 0xb9, 0xb3, 0x49, 0xef,
+	0xb9, 0x91, 0x9a, 0xdf, 0x34, 0x1c, 0xe6, 0x36, 0x32, 0x22, 0x60, 0xe7, 0x71, 0xf9, 0xfe, 0x1f,
+	0x74, 0x6a, 0x16, 0xbd, 0x72, 0xdf, 0x6e, 0x78, 0x03, 0x85, 0x46, 0xc2, 0xeb, 0x30, 0xe3, 0xf9,
+	0xec, 0x22, 0x42, 0x9a, 0x52, 0xca, 0x64, 0xc2, 0xe4, 0xa8, 0x1e, 0xa1, 0x31, 0x81, 0x80, 0xd3,
+	0x75, 0x68, 0x83, 0x5c, 0x1a, 0x4c, 0x5a, 0x25, 0xb9, 0xb0, 0x88, 0x05, 0x94, 0x5e, 0x80, 0xf9,
+	0xaf, 0xb0, 0x32, 0x9d, 0x7f, 0x01, 0xe6, 0x95, 0x92, 0x12, 0x67, 0x28, 0x25, 0x4e, 0x66, 0x84,
+	0xeb, 0xf8, 0xcd, 0xd5, 0x9a, 0xb8, 0xcb, 0x68, 0xb9, 0x29, 0x9a, 0xab, 0x35, 0xcc, 0x61, 0x68,
+	0x1e, 0x86, 0xd8, 0x0f, 0x19, 0xf9, 0x2a, 0x8f, 0x17, 0xad, 0xd6, 0xb4, 0x9c, 0xca, 0xac, 0x02,
+	0x16, 0x15, 0x99, 0xfd, 0x81, 0x5e, 0x00, 0x99, 0xfd, 0x61, 0xf8, 0x31, 0xed, 0x0f, 0x92, 0x00,
+	0x8e, 0x69, 0xa1, 0x87, 0x70, 0xca, 0xb8, 0x74, 0xab, 0x17, 0xae, 0x90, 0xef, 0x64, 0x93, 0x40,
+	0x5e, 0x38, 0x2f, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xa6, 0x91,
+	0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0xba, 0x48, 0xb7, 0x98, 0x26, 0x8c, 0xde, 0x86, 0x91, 0x8f,
+	0x7c, 0xee, 0x10, 0x2b, 0xee, 0x5f, 0x32, 0x3e, 0xd3, 0xc8, 0x07, 0xb7, 0xeb, 0xac, 0xfc, 0x70,
+	0xbf, 0x3a, 0x56, 0xf3, 0x9b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x5f, 0xb2, 0x60, 0x36, 0x7d, 0xab,
+	0x57, 0x9d, 0x9e, 0xe8, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x97, 0x1c, 0x2e, 0x68, 0x0a,
+	0x7d, 0x91, 0xee, 0xa7, 0xd0, 0x7d, 0xc4, 0x5f, 0xb8, 0x68, 0x0e, 0x09, 0x98, 0x95, 0x1e, 0xee,
+	0x57, 0xa7, 0x38, 0xfb, 0x77, 0x1f, 0xa9, 0x2c, 0x1a, 0xbc, 0x02, 0xfa, 0x31, 0x38, 0x15, 0xa4,
+	0xb5, 0xec, 0x44, 0xde, 0x34, 0x3e, 0xdb, 0xcf, 0x51, 0x92, 0x9c, 0x70, 0x9c, 0x45, 0x10, 0x67,
+	0xb7, 0x83, 0xfe, 0xaa, 0x05, 0xcf, 0x90, 0x7c, 0x0b, 0xae, 0xb8, 0x2a, 0xbc, 0x96, 0xd3, 0x8f,
+	0x02, 0xdb, 0x2f, 0x4b, 0x30, 0xf0, 0x4c, 0x01, 0x02, 0x2e, 0x6a, 0xd7, 0xfe, 0xc7, 0x16, 0xb3,
+	0xfa, 0x08, 0x54, 0x12, 0x76, 0x5b, 0xd1, 0x31, 0x38, 0xc7, 0x2e, 0x1b, 0xae, 0x25, 0x8f, 0xed,
+	0xdd, 0xfa, 0xdf, 0x5a, 0xcc, 0xbb, 0xf5, 0x18, 0xdf, 0xe9, 0x7e, 0x00, 0x23, 0x91, 0x68, 0x4d,
+	0x74, 0x3d, 0xcf, 0x13, 0x4f, 0x76, 0x8a, 0x79, 0xf8, 0xaa, 0x1b, 0xa6, 0x2c, 0xc5, 0x8a, 0x8c,
+	0xfd, 0x5f, 0xf1, 0x19, 0x90, 0x90, 0x63, 0x30, 0x6e, 0x2f, 0x99, 0xc6, 0xed, 0x6a, 0x8f, 0x2f,
+	0xc8, 0x31, 0x72, 0xff, 0x03, 0xb3, 0xdf, 0x4c, 0xb3, 0xfa, 0x49, 0x77, 0xab, 0xb6, 0xbf, 0x6b,
+	0x01, 0xc4, 0xe9, 0x94, 0xfa, 0x48, 0x8c, 0x7f, 0x8d, 0xde, 0x29, 0xfd, 0xc8, 0x6f, 0xf8, 0x2d,
+	0x61, 0x5c, 0x3b, 0x17, 0xdb, 0xd7, 0x79, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x54, 0x95, 0xf1,
+	0xcd, 0xcb, 0xb1, 0x5b, 0x8b, 0x11, 0xdb, 0xfc, 0x67, 0x2c, 0x38, 0x99, 0xf5, 0xe8, 0x0b, 0xbd,
+	0x02, 0x23, 0x5c, 0xc7, 0xac, 0x5c, 0xde, 0xd5, 0x6c, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xf4, 0xeb,
+	0xfa, 0x7e, 0xc4, 0x54, 0x3f, 0xb7, 0x61, 0xa2, 0x16, 0x10, 0x4d, 0xee, 0x79, 0x37, 0xce, 0x42,
+	0x36, 0xba, 0xf0, 0xca, 0x91, 0x23, 0xa9, 0xd9, 0xbf, 0x54, 0x82, 0x93, 0xdc, 0x71, 0x73, 0x7e,
+	0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x29, 0x9e, 0xea, 0x7f, 0x15, 0xc6, 0x3b, 0x9a, 0x61, 0xa0, 0x28,
+	0x6d, 0x85, 0x6e, 0x40, 0x88, 0x55, 0x99, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x26, 0x8c, 0x93, 0x5d,
+	0xb7, 0xa1, 0x1c, 0xc3, 0x4a, 0x47, 0x16, 0x1e, 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0xed,
+	0xfb, 0xb9, 0x85, 0x26, 0x3a, 0x0e, 0xf4, 0x70, 0x06, 0xfb, 0x59, 0x0b, 0xce, 0xe4, 0x24, 0xb9,
+	0xa0, 0xcd, 0x3d, 0x60, 0x2e, 0xb2, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0xe3, 0x2c, 0x16, 0x50, 0xf4,
+	0x65, 0x80, 0x4e, 0x9c, 0x1a, 0xb8, 0x47, 0x36, 0x00, 0x23, 0x2e, 0xb8, 0x16, 0xe2, 0x59, 0x65,
+	0x10, 0xd6, 0x68, 0xd9, 0x3f, 0x33, 0x00, 0x83, 0xcc, 0x07, 0x0f, 0xd5, 0x60, 0x78, 0x9b, 0x47,
+	0x20, 0x2d, 0x9c, 0x37, 0x8a, 0x2b, 0x43, 0x9a, 0xc6, 0xf3, 0xa6, 0x95, 0x62, 0x49, 0x06, 0xad,
+	0xc1, 0x09, 0x9e, 0xf6, 0xb8, 0xb5, 0x44, 0x5a, 0xce, 0x9e, 0xd4, 0xb9, 0x97, 0xd8, 0xa7, 0x2a,
+	0xdb, 0xc3, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc2, 0x64, 0xe4, 0xb6, 0x89, 0xdf, 0x8d,
+	0x4c, 0x77, 0x53, 0x75, 0x2d, 0xdc, 0x30, 0xa0, 0x38, 0x81, 0x8d, 0xde, 0x86, 0x89, 0x4e, 0xca,
+	0xba, 0x30, 0x18, 0xab, 0xe1, 0x4c, 0x8b, 0x82, 0x89, 0xcb, 0xde, 0x7d, 0x75, 0xd9, 0x2b, 0xb7,
+	0x8d, 0xed, 0x80, 0x84, 0xdb, 0x7e, 0xab, 0xc9, 0x24, 0xf3, 0x41, 0xed, 0xdd, 0x57, 0x02, 0x8e,
+	0x53, 0x35, 0x28, 0x95, 0x4d, 0xc7, 0x6d, 0x75, 0x03, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x24,
+	0xe0, 0x38, 0x55, 0xa3, 0xb7, 0xd9, 0x64, 0xf8, 0xc9, 0x98, 0x4d, 0xec, 0xbf, 0x5b, 0x02, 0x63,
+	0x6a, 0x7f, 0x88, 0xb3, 0x18, 0xbf, 0x03, 0x03, 0x5b, 0x41, 0xa7, 0x21, 0xfc, 0x4d, 0x33, 0xbf,
+	0xec, 0x3a, 0xae, 0x2d, 0xea, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0x53, 0xc2, 0xfb,
+	0x5a, 0x06, 0x29, 0x56, 0xcf, 0x2b, 0x87, 0xa5, 0x26, 0xa2, 0x20, 0x9c, 0xbf, 0x78, 0x23, 0xa6,
+	0xfc, 0xb7, 0x35, 0x53, 0xb8, 0xd0, 0x43, 0x48, 0x2a, 0xe8, 0x2a, 0x8c, 0x89, 0xc4, 0xb2, 0xec,
+	0x15, 0x20, 0xdf, 0x4c, 0xcc, 0x95, 0x74, 0x29, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0x97, 0x4b, 0x70,
+	0x22, 0xe3, 0x19, 0x37, 0x3f, 0x46, 0xb6, 0xdc, 0x30, 0x0a, 0xf6, 0x92, 0x87, 0x13, 0x16, 0xe5,
+	0x58, 0x61, 0x50, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0x49, 0x0a, 0xe8, 0xd1, 0x0e,
+	0x27, 0x7a, 0x6c, 0x77, 0x43, 0x22, 0x33, 0x87, 0xa8, 0x63, 0x9b, 0xb9, 0x64, 0x30, 0x08, 0xbd,
+	0x9a, 0x6e, 0x29, 0x3f, 0x03, 0xed, 0x6a, 0xca, 0x3d, 0x0d, 0x38, 0x8c, 0x76, 0x2e, 0x22, 0x9e,
+	0xe3, 0x45, 0xe2, 0x02, 0x1b, 0x47, 0x94, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xef, 0x95, 0xe1, 0x6c,
+	0x6e, 0x60, 0x07, 0xda, 0xf5, 0xb6, 0xef, 0xb9, 0x91, 0xaf, 0x7c, 0x74, 0x79, 0x14, 0x79, 0xd2,
+	0xd9, 0x5e, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0x2c, 0x12, 0xc9, 0xa4, 0x92, 0x78,
+	0x61, 0x89, 0xc7, 0xd8, 0xe5, 0x60, 0xed, 0x54, 0x2f, 0x17, 0x9e, 0xea, 0xcf, 0x51, 0x09, 0xc6,
+	0x6f, 0x25, 0x0f, 0x14, 0xda, 0x5d, 0xdf, 0x6f, 0x61, 0x06, 0x44, 0x2f, 0x88, 0xf1, 0x4a, 0x38,
+	0xa5, 0x62, 0xa7, 0xe9, 0x87, 0xda, 0xa0, 0x71, 0x07, 0xf8, 0xc0, 0xf5, 0xb6, 0x92, 0xce, 0xca,
+	0x37, 0x79, 0x31, 0x96, 0x70, 0xba, 0x97, 0xe2, 0xdc, 0xf8, 0xc3, 0xf9, 0x7b, 0x49, 0x65, 0xc0,
+	0xef, 0x99, 0x16, 0x5f, 0x5f, 0x01, 0x23, 0x3d, 0xc5, 0x93, 0x9f, 0x2a, 0xc3, 0x14, 0x5e, 0x58,
+	0xfa, 0x74, 0x22, 0xee, 0xa4, 0x27, 0xa2, 0x7f, 0xb3, 0xd9, 0x93, 0x9a, 0x8d, 0x7f, 0x68, 0xc1,
+	0x14, 0x4b, 0x6f, 0x2b, 0xa2, 0x32, 0xb9, 0xbe, 0x77, 0x0c, 0x57, 0x81, 0xe7, 0x60, 0x30, 0xa0,
+	0x8d, 0x8a, 0x19, 0x54, 0x7b, 0x9c, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x07, 0x03, 0xac, 0x0b, 0x74,
+	0xf2, 0xc6, 0x39, 0x0b, 0x5e, 0x72, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x1f, 0x16, 0x93, 0x4e, 0xcb,
+	0xe5, 0x9d, 0x8e, 0xfd, 0x45, 0x3e, 0x19, 0x21, 0x9f, 0x32, 0xbb, 0xf6, 0xf1, 0xe2, 0xc3, 0x66,
+	0x93, 0x2c, 0xbe, 0x66, 0xff, 0x71, 0x09, 0x2e, 0x64, 0xd6, 0xeb, 0x3b, 0x3e, 0x6c, 0x71, 0xed,
+	0xa7, 0x99, 0x0c, 0xb3, 0x7c, 0x8c, 0x4f, 0x41, 0x06, 0xfa, 0x95, 0xfe, 0x07, 0xfb, 0x08, 0xdb,
+	0x9a, 0x39, 0x64, 0x9f, 0x90, 0xb0, 0xad, 0x99, 0x7d, 0xcb, 0x51, 0x13, 0xfc, 0x69, 0x29, 0xe7,
+	0x5b, 0x98, 0xc2, 0xe0, 0x32, 0xe5, 0x33, 0x0c, 0x18, 0xca, 0x4b, 0x38, 0xe7, 0x31, 0xbc, 0x0c,
+	0x2b, 0x28, 0x9a, 0x87, 0xa9, 0xb6, 0xeb, 0x51, 0xe6, 0xb3, 0x67, 0x8a, 0xe2, 0xca, 0x90, 0xb4,
+	0x66, 0x82, 0x71, 0x12, 0x1f, 0xb9, 0x5a, 0x48, 0x57, 0xfe, 0x75, 0x6f, 0x1f, 0x69, 0xd7, 0xcd,
+	0x99, 0xbe, 0x34, 0x6a, 0x14, 0x33, 0xc2, 0xbb, 0xae, 0x69, 0x7a, 0xa2, 0x72, 0xff, 0x7a, 0xa2,
+	0xf1, 0x6c, 0x1d, 0xd1, 0xec, 0xdb, 0x30, 0xf1, 0xd8, 0xf6, 0x1f, 0xfb, 0xfb, 0x65, 0x78, 0xa6,
+	0x60, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, 0xc6, 0xeb, 0x53, 0xf3, 0x50, 0x83, 0x93, 0x9b, 0xdd,
+	0x56, 0x6b, 0x8f, 0x3d, 0x6c, 0x25, 0x4d, 0x89, 0x21, 0x64, 0x4a, 0xf5, 0xf4, 0x6d, 0x25, 0x03,
+	0x07, 0x67, 0xd6, 0xa4, 0x57, 0x2c, 0x7a, 0x92, 0xec, 0x29, 0x52, 0x89, 0x2b, 0x16, 0xd6, 0x81,
+	0xd8, 0xc4, 0x45, 0xd7, 0x61, 0xc6, 0xd9, 0x75, 0x5c, 0x9e, 0x4c, 0x48, 0x12, 0xe0, 0x77, 0x2c,
+	0xa5, 0x23, 0x9f, 0x4f, 0x22, 0xe0, 0x74, 0x9d, 0x1c, 0x53, 0x55, 0xf9, 0xb1, 0x4c, 0x55, 0x66,
+	0x70, 0xd1, 0xa1, 0xfc, 0xe0, 0xa2, 0xc5, 0x7c, 0xb1, 0x67, 0x1e, 0xd6, 0x0f, 0x61, 0xe2, 0xa8,
+	0x3e, 0xf1, 0x2f, 0xc1, 0xb0, 0x78, 0xc3, 0x93, 0x7c, 0xaf, 0x29, 0xf3, 0xff, 0x4b, 0xb8, 0xfd,
+	0xbf, 0x5a, 0xa0, 0x74, 0xdc, 0x66, 0x1e, 0x81, 0xb7, 0x99, 0x83, 0x3f, 0xd7, 0xce, 0x6b, 0x6f,
+	0x45, 0x4f, 0x69, 0x0e, 0xfe, 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0xb9, 0x85, 0x71, 0xc4, 0x1a, 0xe3,
+	0x02, 0x21, 0x6c, 0xab, 0x0a, 0x03, 0x7d, 0x05, 0x86, 0x9b, 0xee, 0xae, 0x1b, 0x0a, 0x3d, 0xda,
+	0x91, 0x6d, 0x93, 0xf1, 0xf7, 0x2d, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0xff, 0x8a, 0x05, 0xca, 0x28,
+	0x7c, 0x83, 0x38, 0xad, 0x68, 0x1b, 0xbd, 0x07, 0x20, 0x29, 0x28, 0xdd, 0x9b, 0x74, 0x55, 0x03,
+	0xac, 0x20, 0x87, 0xc6, 0x3f, 0xac, 0xd5, 0x41, 0xef, 0xc2, 0xd0, 0x36, 0xa3, 0x25, 0xbe, 0xed,
+	0x92, 0x32, 0xc1, 0xb1, 0xd2, 0xc3, 0xfd, 0xea, 0x49, 0xb3, 0x4d, 0x79, 0x8a, 0xf1, 0x5a, 0xf6,
+	0x4f, 0x95, 0xe2, 0x39, 0xfd, 0xa0, 0xeb, 0x47, 0xce, 0x31, 0x48, 0x22, 0xd7, 0x0d, 0x49, 0xe4,
+	0x85, 0x22, 0xab, 0x37, 0xeb, 0x52, 0xae, 0x04, 0x72, 0x3b, 0x21, 0x81, 0xbc, 0xd8, 0x9b, 0x54,
+	0xb1, 0xe4, 0xf1, 0x5f, 0x5b, 0x30, 0x63, 0xe0, 0x1f, 0xc3, 0x01, 0xb8, 0x62, 0x1e, 0x80, 0xcf,
+	0xf6, 0xfc, 0x86, 0x9c, 0x83, 0xef, 0x27, 0xca, 0x89, 0xbe, 0xb3, 0x03, 0xef, 0x23, 0x18, 0xd8,
+	0x76, 0x82, 0xa6, 0xb8, 0xd7, 0x5f, 0xe9, 0x6b, 0xac, 0xe7, 0x6e, 0x38, 0x81, 0x70, 0x73, 0x79,
+	0x45, 0x8e, 0x3a, 0x2d, 0xea, 0xe9, 0xe2, 0xc2, 0x9a, 0x42, 0xd7, 0x60, 0x28, 0x6c, 0xf8, 0x1d,
+	0xf5, 0x24, 0xf4, 0x22, 0x1b, 0x68, 0x56, 0x72, 0xb8, 0x5f, 0x45, 0x66, 0x73, 0xb4, 0x18, 0x0b,
+	0x7c, 0xf4, 0x55, 0x98, 0x60, 0xbf, 0x94, 0xcf, 0x69, 0x39, 0x5f, 0x03, 0x53, 0xd7, 0x11, 0xb9,
+	0x43, 0xb6, 0x51, 0x84, 0x4d, 0x52, 0xb3, 0x5b, 0x30, 0xaa, 0x3e, 0xeb, 0xa9, 0x7a, 0x24, 0xfc,
+	0x8b, 0x32, 0x9c, 0xc8, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x63, 0xce,
+	0x45, 0xc8, 0x2e, 0x80, 0x4d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x4e, 0x48, 0x92, 0x8d, 0xd2, 0xa2,
+	0xde, 0x8d, 0xd2, 0xc6, 0x8e, 0x6d, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x4f, 0x75, 0x4e, 0x7f, 0x6b,
+	0x00, 0x4e, 0x66, 0x39, 0xe2, 0xa0, 0x1f, 0x85, 0x21, 0xf6, 0x9c, 0xaf, 0xf0, 0xfd, 0x6b, 0x56,
+	0xcd, 0x39, 0xf6, 0x22, 0x50, 0x84, 0xa2, 0x9e, 0x93, 0xec, 0x88, 0x17, 0xf6, 0x1c, 0x66, 0xd1,
+	0x26, 0x0b, 0x11, 0x27, 0x4e, 0x4f, 0xc9, 0x3e, 0x3e, 0xdf, 0x77, 0x07, 0xc4, 0xf9, 0x1b, 0x26,
+	0xfc, 0xd9, 0x64, 0x71, 0x6f, 0x7f, 0x36, 0xd9, 0x32, 0x5a, 0x85, 0xa1, 0x06, 0x77, 0x94, 0x2a,
+	0xf7, 0x66, 0x61, 0xdc, 0x4b, 0x4a, 0x31, 0x60, 0xe1, 0x1d, 0x25, 0x08, 0xcc, 0xba, 0x30, 0xa6,
+	0x0d, 0xcc, 0x53, 0x5d, 0x3c, 0x3b, 0xf4, 0xe0, 0xd3, 0x86, 0xe0, 0xa9, 0x2e, 0xa0, 0xbf, 0xae,
+	0x9d, 0xfd, 0x82, 0x1f, 0x7c, 0xce, 0x90, 0x9d, 0xce, 0x25, 0x1e, 0x59, 0x26, 0xf6, 0x15, 0x93,
+	0xa5, 0xea, 0x66, 0x0e, 0x87, 0xdc, 0x44, 0x74, 0xe6, 0x81, 0x5f, 0x9c, 0xb7, 0xc1, 0xfe, 0x59,
+	0x0b, 0x12, 0xcf, 0xe0, 0x94, 0xba, 0xd3, 0xca, 0x55, 0x77, 0x5e, 0x84, 0x81, 0xc0, 0x6f, 0x49,
+	0x79, 0x4a, 0x61, 0x60, 0xbf, 0x45, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x56, 0x62, 0x8d, 0xeb, 0x17,
+	0x74, 0x71, 0xf5, 0x7e, 0x0e, 0x06, 0x5b, 0x64, 0x97, 0xb4, 0x92, 0xf9, 0x98, 0x6f, 0xd1, 0x42,
+	0xcc, 0x61, 0xf6, 0x3f, 0x1c, 0x80, 0xf3, 0x85, 0x91, 0x24, 0xa9, 0x80, 0xb9, 0xe5, 0x44, 0xe4,
+	0x81, 0xb3, 0x97, 0xcc, 0x43, 0x7a, 0x9d, 0x17, 0x63, 0x09, 0x67, 0xef, 0xee, 0x79, 0x6e, 0xad,
+	0x84, 0x72, 0x58, 0xa4, 0xd4, 0x12, 0x50, 0x53, 0xd9, 0x58, 0x7e, 0x12, 0xca, 0xc6, 0xd7, 0x00,
+	0xc2, 0xb0, 0xc5, 0xbd, 0x5d, 0x9b, 0xe2, 0x41, 0x7f, 0x1c, 0xe9, 0xa4, 0x7e, 0x4b, 0x40, 0xb0,
+	0x86, 0x85, 0x96, 0x60, 0xba, 0x13, 0xf8, 0x11, 0xd7, 0xb5, 0x2f, 0x71, 0x87, 0xf0, 0x41, 0x33,
+	0x88, 0x5f, 0x2d, 0x01, 0xc7, 0xa9, 0x1a, 0xe8, 0x4d, 0x18, 0x13, 0x81, 0xfd, 0x6a, 0xbe, 0xdf,
+	0x12, 0xea, 0x3d, 0xe5, 0x23, 0x5d, 0x8f, 0x41, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x05, 0xfe, 0x70,
+	0x66, 0x35, 0xae, 0xc4, 0xd7, 0xf0, 0x12, 0x49, 0x40, 0x46, 0xfa, 0x4a, 0x02, 0x12, 0x2b, 0x3c,
+	0x47, 0xfb, 0xb6, 0x27, 0x43, 0x4f, 0x15, 0xe1, 0xaf, 0x0c, 0xc0, 0x09, 0xb1, 0x70, 0x9e, 0xf6,
+	0x72, 0xb9, 0x93, 0x5e, 0x2e, 0x4f, 0x42, 0x25, 0xfa, 0xe9, 0x9a, 0x39, 0xee, 0x35, 0xf3, 0xd3,
+	0x16, 0x98, 0x32, 0x24, 0xfa, 0x8f, 0x72, 0x13, 0x39, 0xbf, 0x99, 0x2b, 0x93, 0xc6, 0x19, 0x02,
+	0x3e, 0x5e, 0x4a, 0x67, 0xfb, 0x7f, 0xb6, 0xe0, 0xd9, 0x9e, 0x14, 0xd1, 0x32, 0x8c, 0x32, 0x41,
+	0x57, 0xbb, 0x17, 0xbf, 0xa8, 0x1e, 0x8c, 0x48, 0x40, 0x8e, 0xdc, 0x1d, 0xd7, 0x44, 0xcb, 0xa9,
+	0x8c, 0xd9, 0x2f, 0x65, 0x64, 0xcc, 0x3e, 0x65, 0x0c, 0xcf, 0x63, 0xa6, 0xcc, 0xfe, 0x49, 0x7a,
+	0xe2, 0x98, 0xaf, 0x4e, 0x3f, 0x6f, 0xa8, 0x73, 0xed, 0x84, 0x3a, 0x17, 0x99, 0xd8, 0xda, 0x19,
+	0xf2, 0x1e, 0x4c, 0xb3, 0x88, 0xbf, 0xec, 0xf9, 0x92, 0x78, 0xae, 0x5a, 0x8a, 0xbd, 0x9d, 0x6f,
+	0x25, 0x60, 0x38, 0x85, 0x6d, 0xff, 0x9b, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x17, 0xdf, 0x97,
+	0x61, 0xd4, 0x6d, 0xb7, 0xbb, 0x3c, 0x09, 0xf2, 0x60, 0xec, 0xf0, 0xbe, 0x2a, 0x0b, 0x71, 0x0c,
+	0x47, 0x2b, 0xc2, 0x92, 0x50, 0x90, 0x54, 0x80, 0x77, 0x7c, 0x6e, 0xc9, 0x89, 0x1c, 0x2e, 0xc5,
+	0xa9, 0x73, 0x36, 0xb6, 0x39, 0xa0, 0x6f, 0x00, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x64,
+	0x9e, 0xf9, 0x6c, 0x01, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x63, 0x9e, 0xa3, 0x00, 0x58, 0xa3, 0x88,
+	0xe6, 0x8c, 0x93, 0x7e, 0x36, 0x31, 0x77, 0xc0, 0xa9, 0xc6, 0x73, 0x36, 0xfb, 0x05, 0x18, 0x55,
+	0xc4, 0x7b, 0xe9, 0x15, 0xc7, 0x75, 0x81, 0xed, 0x4b, 0x30, 0x95, 0xe8, 0xdb, 0x91, 0xd4, 0x92,
+	0xbf, 0x6e, 0xc1, 0x14, 0xef, 0xcc, 0xb2, 0xb7, 0x2b, 0x4e, 0x83, 0x47, 0x70, 0xb2, 0x95, 0xc1,
+	0x95, 0xc5, 0xf4, 0xf7, 0xcf, 0xc5, 0x95, 0x1a, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9,
+	0x8e, 0xa3, 0x5c, 0xd7, 0x69, 0x89, 0x98, 0x2b, 0xe3, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6,
+	0x1f, 0x58, 0x30, 0xc3, 0x7b, 0x7e, 0x93, 0xec, 0x29, 0xde, 0xf4, 0x83, 0xec, 0xbb, 0x48, 0xbf,
+	0x5f, 0xca, 0x49, 0xbf, 0xaf, 0x7f, 0x5a, 0xb9, 0xf0, 0xd3, 0x7e, 0xc9, 0x02, 0xb1, 0x42, 0x8e,
+	0x41, 0xd3, 0xf2, 0x23, 0xa6, 0xa6, 0x65, 0x36, 0x7f, 0x13, 0xe4, 0xa8, 0x58, 0xfe, 0xbd, 0x05,
+	0xd3, 0x1c, 0x41, 0x8b, 0x62, 0xf7, 0x83, 0x9c, 0x87, 0x05, 0xf3, 0x8b, 0x32, 0xdd, 0x5a, 0x6f,
+	0x92, 0xbd, 0x0d, 0xbf, 0xe6, 0x44, 0xdb, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xe1, 0x64, 0x35,
+	0xe5, 0x06, 0x32, 0x12, 0xad, 0xf6, 0x50, 0x00, 0x1f, 0x35, 0xd1, 0xaa, 0xfd, 0x47, 0x16, 0x20,
+	0xde, 0x8c, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x95, 0x66, 0x06, 0x0b, 0x54, 0x10, 0xac, 0x61, 0x3d,
+	0x91, 0xe1, 0x49, 0xb8, 0xb2, 0x94, 0x7b, 0xbb, 0xb2, 0x1c, 0x61, 0x44, 0x7f, 0x69, 0x18, 0x92,
+	0x0f, 0x56, 0xd1, 0x5d, 0x18, 0x6f, 0x38, 0x1d, 0xe7, 0xbe, 0xdb, 0x72, 0x23, 0x97, 0x84, 0x45,
+	0x7e, 0x6e, 0x8b, 0x1a, 0x9e, 0x70, 0x3e, 0xd0, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, 0x74, 0x02,
+	0x77, 0xd7, 0x6d, 0x91, 0x2d, 0xa6, 0x10, 0x62, 0x51, 0x9e, 0xb8, 0xd3, 0x9d, 0x2c, 0xc5, 0x1a,
+	0x46, 0x46, 0x70, 0x95, 0xf2, 0x53, 0x0e, 0xae, 0x02, 0xc7, 0x16, 0x5c, 0x65, 0xe0, 0x48, 0xc1,
+	0x55, 0x46, 0x8e, 0x1c, 0x5c, 0x65, 0xb0, 0xaf, 0xe0, 0x2a, 0x18, 0x4e, 0x4b, 0xd9, 0x93, 0xfe,
+	0x5f, 0x71, 0x5b, 0x44, 0x5c, 0x38, 0x78, 0x68, 0xaa, 0xd9, 0x83, 0xfd, 0xea, 0x69, 0x9c, 0x89,
+	0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x43, 0xc5, 0x69, 0xb5, 0xfc, 0x07, 0x6a, 0x52, 0x97, 0xc3, 0x86,
+	0xd3, 0x8a, 0xc3, 0x32, 0x8e, 0x2c, 0x9c, 0x3b, 0xd8, 0xaf, 0x56, 0xe6, 0x73, 0x70, 0x70, 0x6e,
+	0x6d, 0xf4, 0x0e, 0x8c, 0x76, 0x02, 0xbf, 0xb1, 0xa6, 0xbd, 0xaa, 0xbf, 0x40, 0x07, 0xb0, 0x26,
+	0x0b, 0x0f, 0xf7, 0xab, 0x13, 0xea, 0x0f, 0x3b, 0xf0, 0xe3, 0x0a, 0x19, 0x71, 0x4b, 0xc6, 0x9e,
+	0x76, 0xdc, 0x92, 0xf1, 0x27, 0x1c, 0xb7, 0xc4, 0xde, 0x81, 0x13, 0x75, 0x12, 0xb8, 0x4e, 0xcb,
+	0x7d, 0x44, 0x65, 0x72, 0xc9, 0x03, 0x37, 0x60, 0x34, 0x48, 0x70, 0xfd, 0xbe, 0x92, 0x09, 0x68,
+	0x7a, 0x19, 0xc9, 0xe5, 0x63, 0x42, 0xf6, 0xff, 0x6b, 0xc1, 0xb0, 0x78, 0x04, 0x7b, 0x0c, 0x92,
+	0xe9, 0xbc, 0x61, 0x92, 0xa9, 0x66, 0x4f, 0x0a, 0xeb, 0x4c, 0xae, 0x31, 0x66, 0x35, 0x61, 0x8c,
+	0x79, 0xb6, 0x88, 0x48, 0xb1, 0x19, 0xe6, 0x3f, 0x2b, 0xd3, 0x1b, 0x82, 0x11, 0x8e, 0xe1, 0xe9,
+	0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0x22, 0x1c, 0x40, 0x29, 0xff, 0x8d, 0x51, 0x72, 0x12, 0x63, 0x1f,
+	0x48, 0x11, 0x00, 0x40, 0x12, 0xc9, 0x8c, 0x33, 0x50, 0x7e, 0x8a, 0x71, 0x06, 0x7a, 0x05, 0xac,
+	0x18, 0x78, 0x12, 0x01, 0x2b, 0xec, 0xdf, 0x60, 0xa7, 0xb3, 0x5e, 0x7e, 0x0c, 0x82, 0xdb, 0x75,
+	0xf3, 0x1c, 0xb7, 0x0b, 0x56, 0x96, 0xe8, 0x54, 0x8e, 0x00, 0xf7, 0x6b, 0x16, 0x9c, 0xcf, 0xf8,
+	0x2a, 0x4d, 0x9a, 0x7b, 0x05, 0x46, 0x9c, 0x6e, 0xd3, 0x55, 0x7b, 0x59, 0xb3, 0x16, 0xcf, 0x8b,
+	0x72, 0xac, 0x30, 0xd0, 0x22, 0xcc, 0x90, 0x54, 0x7c, 0x61, 0x1e, 0xb9, 0x8b, 0xbd, 0x9c, 0x4e,
+	0x07, 0x17, 0x4e, 0xe3, 0xab, 0xa0, 0x77, 0xe5, 0xdc, 0xa0, 0x77, 0x7f, 0xcf, 0x82, 0x31, 0xf5,
+	0x20, 0xfe, 0xa9, 0x8f, 0xf6, 0x7b, 0xe6, 0x68, 0x3f, 0x53, 0x30, 0xda, 0x39, 0xc3, 0xfc, 0x7b,
+	0x25, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x7d, 0x48, 0x89, 0x8f, 0xff, 0xec, 0xe5, 0x2a, 0x8c, 0x39,
+	0x9d, 0x8e, 0x04, 0x48, 0xff, 0x45, 0x96, 0x1a, 0x26, 0x2e, 0xc6, 0x3a, 0x8e, 0x7a, 0x85, 0x53,
+	0xce, 0x7d, 0x85, 0xd3, 0x04, 0x88, 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0xb8, 0x5b, 0xe7, 0xf3,
+	0x9b, 0x6e, 0xe4, 0xb6, 0xe6, 0x5c, 0x2f, 0x0a, 0xa3, 0x60, 0x6e, 0xd5, 0x8b, 0x6e, 0x07, 0xfc,
+	0x9a, 0xaa, 0x85, 0x96, 0x54, 0xb4, 0xb0, 0x46, 0x57, 0x06, 0x7f, 0x61, 0x6d, 0x0c, 0x9a, 0x8e,
+	0x30, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0b, 0xec, 0xf4, 0x61, 0x63, 0x7a, 0xb4, 0x90, 0x89,
+	0x7f, 0x3c, 0xae, 0x66, 0x83, 0x99, 0x84, 0x97, 0xf4, 0xc0, 0x8c, 0xc5, 0xcc, 0x9e, 0x36, 0xac,
+	0xbf, 0xb3, 0x8d, 0xa3, 0x37, 0xa2, 0xaf, 0xa5, 0x9c, 0x9b, 0x5e, 0xed, 0x71, 0x6a, 0x1c, 0xc1,
+	0x9d, 0x89, 0xe5, 0x89, 0x64, 0x59, 0xf4, 0x56, 0x6b, 0x62, 0x5f, 0x68, 0x79, 0x22, 0x05, 0x00,
+	0xc7, 0x38, 0x54, 0x60, 0x53, 0x7f, 0xc2, 0x0a, 0x8a, 0xd3, 0x09, 0x28, 0xec, 0x10, 0x6b, 0x18,
+	0xe8, 0x8a, 0x50, 0x5a, 0x70, 0xdb, 0xc3, 0x33, 0x09, 0xa5, 0x85, 0x1c, 0x2e, 0x4d, 0xd3, 0x74,
+	0x15, 0xc6, 0xc8, 0xc3, 0x88, 0x04, 0x9e, 0xd3, 0xa2, 0x2d, 0x0c, 0xc6, 0xc1, 0x91, 0x97, 0xe3,
+	0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0a, 0xb9, 0x2e, 0x4f, 0x25, 0xb1, 0xe1, 0x3a, 0xd1, 0xcf,
+	0xaa, 0x50, 0x04, 0x26, 0xf8, 0x90, 0x15, 0x71, 0xee, 0x24, 0x03, 0xb4, 0x24, 0x49, 0xa0, 0x77,
+	0x61, 0xb2, 0xe5, 0x3b, 0xcd, 0x05, 0xa7, 0xe5, 0x78, 0x0d, 0x36, 0x3e, 0x23, 0x46, 0x94, 0xce,
+	0xc9, 0x5b, 0x06, 0x14, 0x27, 0xb0, 0xa9, 0x80, 0xa8, 0x97, 0x88, 0xc4, 0x4b, 0x8e, 0xb7, 0x45,
+	0xc2, 0xca, 0x28, 0xfb, 0x2a, 0x26, 0x20, 0xde, 0xca, 0xc1, 0xc1, 0xb9, 0xb5, 0xd1, 0x35, 0x18,
+	0x97, 0x9f, 0xaf, 0xc5, 0x33, 0x8a, 0x1f, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x4e, 0xc9,
+	0xff, 0x1b, 0x81, 0xb3, 0xb9, 0xe9, 0x36, 0x44, 0x90, 0x0f, 0xfe, 0x28, 0xfd, 0x4b, 0xf2, 0x05,
+	0xec, 0x72, 0x16, 0xd2, 0xe1, 0x7e, 0xf5, 0x9c, 0x18, 0xb5, 0x4c, 0x38, 0xce, 0xa6, 0x8d, 0xd6,
+	0xe0, 0x04, 0xf7, 0x81, 0x59, 0xdc, 0x26, 0x8d, 0x1d, 0xb9, 0xe1, 0x98, 0xd4, 0xa8, 0x3d, 0xfc,
+	0xb9, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x87, 0x50, 0xe9, 0x74, 0xef, 0xb7, 0xdc, 0x70, 0x7b,
+	0xdd, 0x8f, 0x98, 0x0b, 0xd9, 0x7c, 0xb3, 0x19, 0x90, 0x90, 0xbf, 0x59, 0x66, 0x47, 0xaf, 0x8c,
+	0x41, 0x55, 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0x23, 0x38, 0x95, 0x58, 0x08, 0x22, 0x98, 0xcc,
+	0x64, 0x7e, 0x0a, 0xbb, 0x7a, 0x56, 0x05, 0x11, 0x97, 0x29, 0x0b, 0x84, 0xb3, 0x9b, 0x40, 0x6f,
+	0x01, 0xb8, 0x9d, 0x15, 0xa7, 0xed, 0xb6, 0xe8, 0x75, 0xf4, 0x04, 0x5b, 0x23, 0xf4, 0x6a, 0x02,
+	0xab, 0x35, 0x59, 0x4a, 0x79, 0xb3, 0xf8, 0xb7, 0x87, 0x35, 0x6c, 0x74, 0x0b, 0x26, 0xc5, 0xbf,
+	0x3d, 0x31, 0xa5, 0x33, 0x2a, 0xdb, 0xf1, 0xa4, 0xac, 0xa1, 0xe6, 0x31, 0x51, 0x82, 0x13, 0x75,
+	0xd1, 0x16, 0x9c, 0x97, 0xa9, 0x96, 0xf5, 0xf5, 0x29, 0xe7, 0x20, 0x64, 0x79, 0xe3, 0x46, 0xf8,
+	0x9b, 0xa2, 0xf9, 0x22, 0x44, 0x5c, 0x4c, 0x87, 0x9e, 0xeb, 0xfa, 0x32, 0xe7, 0x2f, 0xd9, 0x4f,
+	0xc5, 0xb1, 0x4e, 0x6f, 0x25, 0x81, 0x38, 0x8d, 0x8f, 0x7c, 0x38, 0xe5, 0x7a, 0x59, 0xab, 0xfa,
+	0x34, 0x23, 0xf4, 0x45, 0xfe, 0x88, 0xbf, 0x78, 0x45, 0x67, 0xc2, 0x71, 0x36, 0x5d, 0xb4, 0x0a,
+	0x27, 0x22, 0x5e, 0xb0, 0xe4, 0x86, 0x3c, 0x2d, 0x15, 0xbd, 0xf6, 0x9d, 0x61, 0xcd, 0x9d, 0xa1,
+	0xab, 0x79, 0x23, 0x0d, 0xc6, 0x59, 0x75, 0x3e, 0x9e, 0x03, 0xe8, 0xef, 0x5b, 0xb4, 0xb6, 0x26,
+	0xe8, 0xa3, 0x6f, 0xc2, 0xb8, 0x3e, 0x3e, 0x42, 0x68, 0xb9, 0x94, 0x2d, 0x07, 0x6b, 0xec, 0x85,
+	0x5f, 0x13, 0x14, 0x0b, 0xd1, 0x61, 0xd8, 0xa0, 0x88, 0x1a, 0x19, 0xc1, 0x37, 0xae, 0xf4, 0x27,
+	0x14, 0xf5, 0xef, 0xff, 0x48, 0x20, 0x7b, 0xe7, 0xa0, 0x5b, 0x30, 0xd2, 0x68, 0xb9, 0xc4, 0x8b,
+	0x56, 0x6b, 0x45, 0x21, 0x68, 0x17, 0x05, 0x8e, 0xd8, 0x8a, 0x22, 0x9b, 0x1c, 0x2f, 0xc3, 0x8a,
+	0x82, 0x7d, 0x0d, 0xc6, 0xea, 0x2d, 0x42, 0x3a, 0xfc, 0x1d, 0x17, 0x7a, 0x89, 0x5d, 0x4c, 0x98,
+	0x68, 0x69, 0x31, 0xd1, 0x52, 0xbf, 0x73, 0x30, 0xa1, 0x52, 0xc2, 0xed, 0xdf, 0x2e, 0x41, 0xb5,
+	0x47, 0x52, 0xc3, 0x84, 0xbd, 0xcd, 0xea, 0xcb, 0xde, 0x36, 0x0f, 0x53, 0xf1, 0x3f, 0x5d, 0x95,
+	0xa7, 0x9c, 0xa1, 0xef, 0x9a, 0x60, 0x9c, 0xc4, 0xef, 0xfb, 0x5d, 0x8b, 0x6e, 0xb2, 0x1b, 0xe8,
+	0xf9, 0x32, 0xcb, 0x30, 0xd5, 0x0f, 0xf6, 0x7f, 0xf7, 0xce, 0x35, 0xbb, 0xda, 0xbf, 0x51, 0x82,
+	0x53, 0x6a, 0x08, 0x7f, 0x78, 0x07, 0xee, 0x4e, 0x7a, 0xe0, 0x9e, 0x80, 0xd1, 0xda, 0xbe, 0x0d,
+	0x43, 0x3c, 0x2e, 0x6e, 0x1f, 0x32, 0xff, 0x73, 0x66, 0x1e, 0x06, 0x25, 0x66, 0x1a, 0xb9, 0x18,
+	0xfe, 0x92, 0x05, 0x53, 0x89, 0x07, 0x92, 0x08, 0x6b, 0xaf, 0xe8, 0x1f, 0x47, 0x2e, 0xcf, 0x92,
+	0xf8, 0x2f, 0xc2, 0xc0, 0xb6, 0xaf, 0x9c, 0x94, 0x15, 0xc6, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8,
+	0xff, 0xd2, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0x69, 0xfd, 0xb0, 0x72, 0xac, 0x1f, 0xfd, 0x7c,
+	0x17, 0x7a, 0x13, 0x86, 0xc8, 0xe6, 0x26, 0x69, 0x44, 0x62, 0x56, 0x65, 0x94, 0x8f, 0xa1, 0x65,
+	0x56, 0x4a, 0x85, 0x50, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa3, 0x91, 0xdb, 0x26,
+	0xf3, 0xcd, 0xa6, 0xf0, 0x09, 0x78, 0x8c, 0xd0, 0x34, 0x1b, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0xdf,
+	0x2b, 0x01, 0xc4, 0x71, 0xf8, 0x7a, 0x7d, 0xe2, 0x42, 0xca, 0x5a, 0x7c, 0x29, 0xc3, 0x5a, 0x8c,
+	0x62, 0x82, 0x19, 0xa6, 0x62, 0x35, 0x4c, 0xe5, 0xbe, 0x86, 0x69, 0xe0, 0x28, 0xc3, 0xb4, 0x08,
+	0x33, 0x71, 0x1c, 0x41, 0x33, 0x8c, 0x2a, 0x3b, 0xbf, 0x37, 0x92, 0x40, 0x9c, 0xc6, 0xb7, 0x09,
+	0x5c, 0x54, 0xe1, 0xd4, 0xc4, 0x59, 0xc8, 0x9e, 0x12, 0xe8, 0xd6, 0xf7, 0x1e, 0xe3, 0x14, 0x9b,
+	0xc3, 0x4b, 0xb9, 0xe6, 0xf0, 0xbf, 0x69, 0xc1, 0xc9, 0x64, 0x3b, 0xec, 0xdd, 0xfd, 0x77, 0x2d,
+	0x38, 0x15, 0xe7, 0xf4, 0x4a, 0xbb, 0x20, 0xbc, 0x51, 0x18, 0x22, 0x2e, 0xa7, 0xc7, 0x71, 0x38,
+	0x99, 0xb5, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xff, 0x19, 0x80, 0x4a, 0x5e, 0x6c, 0x39, 0xf6,
+	0xd2, 0xc8, 0x79, 0x58, 0xdf, 0x21, 0x0f, 0xc4, 0x7b, 0x8e, 0xf8, 0xa5, 0x11, 0x2f, 0xc6, 0x12,
+	0x9e, 0x4c, 0xe3, 0x56, 0xea, 0x33, 0x8d, 0xdb, 0x36, 0xcc, 0x3c, 0xd8, 0x26, 0xde, 0x1d, 0x2f,
+	0x74, 0x22, 0x37, 0xdc, 0x74, 0x99, 0x01, 0x9d, 0xaf, 0x9b, 0xb7, 0xe4, 0xab, 0x8b, 0x7b, 0x49,
+	0x84, 0xc3, 0xfd, 0xea, 0x79, 0xa3, 0x20, 0xee, 0x32, 0x67, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x05,
+	0x6f, 0xe0, 0x29, 0x67, 0xc1, 0x6b, 0xbb, 0xc2, 0xed, 0x46, 0x3e, 0x23, 0x61, 0xd7, 0xd6, 0x35,
+	0x55, 0x8a, 0x35, 0x0c, 0xf4, 0x75, 0x40, 0x7a, 0x1a, 0x53, 0x23, 0xb4, 0xef, 0xab, 0x07, 0xfb,
+	0x55, 0xb4, 0x9e, 0x82, 0x1e, 0xee, 0x57, 0x4f, 0xd0, 0xd2, 0x55, 0x8f, 0x5e, 0x7f, 0xe3, 0x78,
+	0x88, 0x19, 0x84, 0xd0, 0x3d, 0x98, 0xa6, 0xa5, 0x6c, 0x47, 0xc9, 0xb8, 0xc1, 0xfc, 0xca, 0xfa,
+	0xf2, 0xc1, 0x7e, 0x75, 0x7a, 0x3d, 0x01, 0xcb, 0x23, 0x9d, 0x22, 0x92, 0x91, 0x0c, 0x6f, 0xa4,
+	0xdf, 0x64, 0x78, 0xf6, 0x77, 0x2d, 0x38, 0x4b, 0x0f, 0xb8, 0xe6, 0xad, 0x1c, 0x2b, 0xba, 0xd3,
+	0x71, 0xb9, 0x9d, 0x46, 0x1c, 0x35, 0x4c, 0x57, 0x57, 0x5b, 0xe5, 0x56, 0x1a, 0x05, 0xa5, 0x1c,
+	0x7e, 0xc7, 0xf5, 0x9a, 0x49, 0x0e, 0x7f, 0xd3, 0xf5, 0x9a, 0x98, 0x41, 0xd4, 0x91, 0x55, 0xce,
+	0xcd, 0x43, 0xf0, 0x2b, 0x74, 0xaf, 0xd2, 0xbe, 0xfc, 0x40, 0xbb, 0x81, 0x5e, 0xd6, 0x6d, 0xaa,
+	0xc2, 0x7d, 0x32, 0xd7, 0x9e, 0xfa, 0x1d, 0x0b, 0xc4, 0xeb, 0xf7, 0x3e, 0xce, 0xe4, 0xaf, 0xc2,
+	0xf8, 0x6e, 0x3a, 0xc5, 0xf3, 0xc5, 0xfc, 0x70, 0x00, 0x22, 0xb1, 0xb3, 0x12, 0xd1, 0x8d, 0x74,
+	0xce, 0x06, 0x2d, 0xbb, 0x09, 0x02, 0xba, 0x44, 0x98, 0x55, 0xa3, 0x77, 0x6f, 0x5e, 0x03, 0x68,
+	0x32, 0x5c, 0x96, 0xec, 0xac, 0x64, 0x4a, 0x5c, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x17, 0xca,
+	0x30, 0x26, 0x53, 0x0a, 0x77, 0xbd, 0x7e, 0x74, 0x8f, 0xba, 0xe0, 0x54, 0xea, 0x29, 0x38, 0x7d,
+	0x08, 0x33, 0x01, 0x69, 0x74, 0x83, 0xd0, 0xdd, 0x25, 0x12, 0x2c, 0x36, 0xc9, 0x1c, 0x4f, 0x83,
+	0x91, 0x00, 0x1e, 0xb2, 0xd0, 0x5d, 0x89, 0x42, 0x66, 0x34, 0x4e, 0x13, 0x42, 0x57, 0x60, 0x94,
+	0xa9, 0xde, 0x6b, 0xb1, 0x42, 0x58, 0x29, 0xbe, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xec, 0x72, 0xd0,
+	0xbd, 0xaf, 0x65, 0xa2, 0x8b, 0x2f, 0x07, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x32, 0x4c, 0xf3, 0x7a,
+	0x81, 0xdf, 0x71, 0xb6, 0xb8, 0x49, 0x70, 0x50, 0x85, 0xd7, 0x99, 0x5e, 0x4b, 0xc0, 0x0e, 0xf7,
+	0xab, 0x27, 0x93, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf3, 0x8f, 0x37, 0x42, 0xcf, 0x8c, 0x94,
+	0xc3, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x27, 0x16, 0xcc, 0x68, 0x53, 0xd5, 0x77, 0x26, 0x12,
+	0x63, 0x90, 0x4a, 0x7d, 0x0c, 0xd2, 0xd1, 0xa2, 0x3d, 0x64, 0xce, 0xf0, 0xc0, 0x13, 0x9a, 0x61,
+	0xfb, 0x9b, 0x80, 0xd2, 0xf9, 0xaa, 0xd1, 0xfb, 0xdc, 0x91, 0xdf, 0x0d, 0x48, 0xb3, 0xc8, 0xe0,
+	0xaf, 0x47, 0xce, 0x91, 0x2f, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x7f, 0x32, 0x00, 0xd3, 0xc9,
+	0x58, 0x1d, 0xe8, 0x06, 0x0c, 0x71, 0x29, 0x5d, 0x90, 0x2f, 0xf0, 0x27, 0xd3, 0x22, 0x7c, 0xf0,
+	0x2c, 0x41, 0x5c, 0xba, 0x17, 0xf5, 0xd1, 0x87, 0x30, 0xd6, 0xf4, 0x1f, 0x78, 0x0f, 0x9c, 0xa0,
+	0x39, 0x5f, 0x5b, 0x15, 0x1c, 0x22, 0x53, 0x01, 0xb5, 0x14, 0xa3, 0xe9, 0x51, 0x43, 0x98, 0xef,
+	0x44, 0x0c, 0xc2, 0x3a, 0x39, 0xb4, 0xc1, 0x12, 0x57, 0x6d, 0xba, 0x5b, 0x6b, 0x4e, 0xa7, 0xe8,
+	0x55, 0xd7, 0xa2, 0x44, 0xd2, 0x28, 0x4f, 0x88, 0xec, 0x56, 0x1c, 0x80, 0x63, 0x42, 0xe8, 0x47,
+	0xe1, 0x44, 0x98, 0x63, 0x12, 0xcb, 0x71, 0x38, 0x28, 0xb4, 0x12, 0x71, 0x65, 0x4a, 0x96, 0xf1,
+	0x2c, 0xab, 0x19, 0xf4, 0x10, 0x90, 0x50, 0x3d, 0x6f, 0x04, 0xdd, 0x30, 0xe2, 0x29, 0x20, 0xc5,
+	0xa5, 0xeb, 0x73, 0xd9, 0x7a, 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x0b, 0x9c, 0x9c, 0xc6, 0xc0, 0x19,
+	0x6d, 0xa0, 0x6d, 0x98, 0xec, 0x18, 0xd9, 0x37, 0xd9, 0xde, 0xcc, 0x89, 0x2e, 0x9c, 0x97, 0xa7,
+	0x93, 0x9f, 0xd2, 0x26, 0x14, 0x27, 0xe8, 0xda, 0xdf, 0x19, 0x80, 0x59, 0x99, 0x8a, 0x3e, 0xe3,
+	0x9d, 0xcc, 0xb7, 0xad, 0xc4, 0x43, 0x99, 0xb7, 0xf2, 0x8f, 0x94, 0xa7, 0xf6, 0x5c, 0xe6, 0x27,
+	0xd3, 0xcf, 0x65, 0xde, 0x39, 0x62, 0x37, 0x9e, 0xd8, 0xa3, 0x99, 0x1f, 0xda, 0x97, 0x2e, 0x07,
+	0x27, 0xc1, 0x10, 0x02, 0x10, 0xe6, 0xf1, 0xef, 0x6b, 0xd2, 0x48, 0x95, 0xa3, 0x68, 0xb8, 0x21,
+	0x70, 0x0c, 0xb1, 0x62, 0x5c, 0x46, 0xc9, 0x67, 0x1c, 0x5d, 0xd1, 0xa1, 0x34, 0x49, 0xbb, 0x13,
+	0xed, 0x2d, 0xb9, 0x81, 0xe8, 0x71, 0x26, 0xcd, 0x65, 0x81, 0x93, 0xa6, 0x29, 0x21, 0x58, 0xd1,
+	0x41, 0xbb, 0x30, 0xb3, 0xc5, 0x62, 0x4b, 0x69, 0x59, 0xe1, 0x05, 0x07, 0xca, 0xe4, 0x10, 0xd7,
+	0x17, 0x97, 0xf3, 0x53, 0xc8, 0xf3, 0x6b, 0x66, 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa4,
+	0xf3, 0x20, 0x5c, 0x6e, 0x39, 0x61, 0xe4, 0x36, 0x16, 0x5a, 0x7e, 0x63, 0xa7, 0x1e, 0xf9, 0x81,
+	0xcc, 0x2a, 0x9a, 0x79, 0xcb, 0x9b, 0xbf, 0x57, 0x4f, 0xe1, 0x1b, 0xcd, 0xb3, 0xec, 0xb6, 0x59,
+	0x58, 0x38, 0xb3, 0x2d, 0xb4, 0x0e, 0xc3, 0x5b, 0x6e, 0x84, 0x49, 0xc7, 0x17, 0x7c, 0x29, 0x93,
+	0xe9, 0x5e, 0xe7, 0x28, 0x46, 0x4b, 0x2c, 0xf6, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xaf, 0x8e,
+	0x9b, 0xa1, 0x7c, 0x55, 0x6f, 0xda, 0xcb, 0x2f, 0xf3, 0xc0, 0x79, 0x17, 0xca, 0xde, 0x66, 0x58,
+	0x14, 0xf5, 0x67, 0x7d, 0xc5, 0xd0, 0xd4, 0x2d, 0x0c, 0xd3, 0x4b, 0xf8, 0xfa, 0x4a, 0x1d, 0xd3,
+	0x8a, 0xec, 0x81, 0x6d, 0xd8, 0x08, 0x5d, 0x91, 0xbc, 0x2b, 0xf3, 0xbd, 0xf1, 0x6a, 0x7d, 0xb1,
+	0xbe, 0x6a, 0xd0, 0x60, 0xf1, 0x13, 0x59, 0x31, 0xe6, 0xd5, 0xd1, 0x5d, 0x18, 0xdd, 0xe2, 0x2c,
+	0x76, 0x93, 0x87, 0xb5, 0xcd, 0x39, 0xf6, 0xae, 0x4b, 0x24, 0x83, 0x1e, 0x3b, 0x9c, 0x14, 0x08,
+	0xc7, 0xa4, 0xd0, 0x77, 0x2c, 0x38, 0xd5, 0x49, 0xe8, 0x6a, 0xd9, 0xb3, 0x38, 0xe1, 0x10, 0x97,
+	0xf9, 0xd4, 0xa0, 0x96, 0x55, 0xc1, 0x68, 0x90, 0x19, 0x7a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74,
+	0xa0, 0x83, 0xfb, 0xcd, 0xa2, 0x7c, 0x4f, 0x89, 0x10, 0x48, 0x7c, 0xa0, 0xf1, 0xc2, 0x12, 0xa6,
+	0x15, 0xd1, 0x06, 0xc0, 0x66, 0x8b, 0x88, 0xd8, 0x92, 0xc2, 0xfd, 0x2a, 0x53, 0xce, 0x58, 0x51,
+	0x58, 0x82, 0x0e, 0xbb, 0xf3, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xc3, 0xf5, 0x9a, 0x24,
+	0x60, 0x66, 0xb4, 0x9c, 0xa5, 0xb4, 0xc8, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1,
+	0x22, 0x9d, 0xed, 0xcd, 0xb0, 0x28, 0xb3, 0xc8, 0x22, 0xe9, 0x6c, 0x27, 0x16, 0x14, 0xa7, 0xc5,
+	0xca, 0xb1, 0xa0, 0x40, 0xb7, 0xcc, 0x26, 0xdd, 0x40, 0x24, 0xa8, 0x4c, 0xe5, 0x6f, 0x99, 0x15,
+	0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x0d, 0x53, 0xae, 0x9a, 0x66, 0x34, 0x5f,
+	0xee, 0x21, 0x57, 0x19, 0x74, 0x8b, 0x25, 0xab, 0xb7, 0xa0, 0xb4, 0xd9, 0x60, 0xe6, 0xb7, 0x1c,
+	0xeb, 0xc4, 0xca, 0xa2, 0x41, 0x8d, 0x45, 0xea, 0x5f, 0x59, 0xc4, 0xa5, 0xcd, 0x06, 0x5d, 0xfa,
+	0xce, 0xa3, 0x6e, 0x40, 0x56, 0xdc, 0x16, 0x11, 0xa1, 0x83, 0x33, 0x97, 0xfe, 0xbc, 0x44, 0x4a,
+	0x2f, 0x7d, 0x05, 0xc2, 0x31, 0x29, 0x4a, 0x37, 0x96, 0xf6, 0x4e, 0xe4, 0xd3, 0x55, 0x42, 0x5d,
+	0x9a, 0x6e, 0xa6, 0xbc, 0xb7, 0x03, 0x13, 0xbb, 0x61, 0x67, 0x9b, 0x48, 0xae, 0xc8, 0x0c, 0x83,
+	0x39, 0x31, 0x31, 0xee, 0x0a, 0x44, 0x37, 0x88, 0xba, 0x4e, 0x2b, 0xc5, 0xc8, 0x99, 0x12, 0xe7,
+	0xae, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0x42, 0xf8, 0x88, 0x07, 0xae, 0x63, 0x26, 0xc2, 0x9c, 0x85,
+	0x90, 0x11, 0xdb, 0x8e, 0x2f, 0x04, 0x01, 0xc0, 0x92, 0x88, 0x1a, 0x6c, 0x76, 0x00, 0x9d, 0xee,
+	0x31, 0xd8, 0xa9, 0xfe, 0xc6, 0x83, 0xcd, 0x0e, 0x9c, 0x98, 0x14, 0x3b, 0x68, 0x3a, 0xdb, 0x7e,
+	0xe4, 0x7b, 0x89, 0x43, 0xee, 0x4c, 0xfe, 0x41, 0x53, 0xcb, 0xc0, 0x4f, 0x1f, 0x34, 0x59, 0x58,
+	0x38, 0xb3, 0x2d, 0xfa, 0x71, 0x1d, 0x19, 0x83, 0x50, 0x64, 0x42, 0x79, 0x29, 0x27, 0x84, 0x67,
+	0x3a, 0x50, 0x21, 0xff, 0x38, 0x05, 0xc2, 0x31, 0x29, 0xd4, 0xa4, 0x92, 0xae, 0x1e, 0xdb, 0x96,
+	0x65, 0x74, 0xc9, 0x91, 0x0b, 0xb2, 0xa2, 0xe0, 0x4a, 0x29, 0x57, 0x87, 0xe0, 0x04, 0x4d, 0xe6,
+	0x23, 0xc8, 0x1f, 0x15, 0xb2, 0x84, 0x2f, 0x39, 0x53, 0x9d, 0xf1, 0xee, 0x90, 0x4f, 0xb5, 0x00,
+	0x60, 0x49, 0x84, 0x8e, 0x86, 0x78, 0x0a, 0xe7, 0x87, 0x2c, 0x6f, 0x52, 0x9e, 0x29, 0x3f, 0xcb,
+	0x20, 0x25, 0x03, 0xcd, 0x0b, 0x10, 0x8e, 0x49, 0x51, 0x4e, 0x4e, 0x0f, 0xbc, 0x73, 0xf9, 0x9c,
+	0x3c, 0x79, 0xdc, 0x31, 0x4e, 0x4e, 0x0f, 0xbb, 0xb2, 0x38, 0xea, 0x54, 0x5c, 0x74, 0x96, 0xf3,
+	0x25, 0xa7, 0x5f, 0x2a, 0xb0, 0x7a, 0xba, 0x5f, 0x0a, 0x84, 0x63, 0x52, 0xec, 0x28, 0x66, 0x41,
+	0xf0, 0x2e, 0x14, 0x1c, 0xc5, 0x14, 0x21, 0xe3, 0x28, 0xd6, 0x82, 0xe4, 0xd9, 0x7f, 0xb9, 0x04,
+	0x17, 0x8a, 0xf7, 0x6d, 0x6c, 0xad, 0xab, 0xc5, 0xde, 0x51, 0x09, 0x6b, 0x1d, 0xd7, 0x1d, 0xc5,
+	0x58, 0x7d, 0x87, 0x36, 0xbe, 0x0e, 0x33, 0xea, 0xe1, 0x63, 0xcb, 0x6d, 0xec, 0x69, 0x89, 0x5e,
+	0x55, 0x10, 0xa0, 0x7a, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0x79, 0x98, 0x32, 0x0a, 0x57, 0x97, 0x84,
+	0xa2, 0x21, 0xce, 0x56, 0x62, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x45, 0x0b, 0xce, 0xf0, 0x40, 0xbc,
+	0xa4, 0x59, 0xf3, 0x9b, 0x52, 0xa3, 0x70, 0xa4, 0xc8, 0xbd, 0x9b, 0x30, 0xd5, 0x31, 0xab, 0xf6,
+	0x08, 0x36, 0xae, 0xa3, 0xc6, 0x7d, 0x4d, 0x00, 0x70, 0x92, 0xa8, 0xfd, 0xf3, 0x25, 0x38, 0x5f,
+	0xe8, 0xc9, 0x8f, 0x30, 0x9c, 0xde, 0x6a, 0x87, 0xce, 0x62, 0x40, 0x9a, 0xc4, 0x8b, 0x5c, 0xa7,
+	0x55, 0xef, 0x90, 0x86, 0x66, 0x6f, 0x65, 0x2e, 0xf1, 0xd7, 0xd7, 0xea, 0xf3, 0x69, 0x0c, 0x9c,
+	0x53, 0x13, 0xad, 0x00, 0x4a, 0x43, 0xc4, 0x0c, 0xb3, 0xcb, 0x74, 0x9a, 0x1e, 0xce, 0xa8, 0x81,
+	0xbe, 0x00, 0x13, 0xea, 0x85, 0x80, 0x36, 0xe3, 0xec, 0x80, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8,
+	0x2a, 0x4f, 0x63, 0x25, 0x12, 0x9e, 0x09, 0xe3, 0xec, 0x94, 0xcc, 0x51, 0x25, 0x8a, 0xb1, 0x8e,
+	0xb3, 0x70, 0xed, 0x77, 0xfe, 0xf0, 0xc2, 0x67, 0x7e, 0xf7, 0x0f, 0x2f, 0x7c, 0xe6, 0x0f, 0xfe,
+	0xf0, 0xc2, 0x67, 0x7e, 0xfc, 0xe0, 0x82, 0xf5, 0x3b, 0x07, 0x17, 0xac, 0xdf, 0x3d, 0xb8, 0x60,
+	0xfd, 0xc1, 0xc1, 0x05, 0xeb, 0x7f, 0x3b, 0xb8, 0x60, 0x7d, 0xef, 0x7f, 0xbf, 0xf0, 0x99, 0xaf,
+	0xa2, 0x38, 0x16, 0xf6, 0x15, 0x3a, 0x3b, 0x57, 0x76, 0xaf, 0xfe, 0x87, 0x00, 0x00, 0x00, 0xff,
+	0xff, 0xba, 0xfb, 0xfc, 0xdd, 0x18, 0x2e, 0x01, 0x00,
 }
 
 func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -9346,6 +9549,22 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if len(m.RestartPolicyRules) > 0 {
+		for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xca
+		}
+	}
 	if m.RestartPolicy != nil {
 		i -= len(*m.RestartPolicy)
 		copy(dAtA[i:], *m.RestartPolicy)
@@ -9600,6 +9819,44 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *ContainerExtendedResourceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerExtendedResourceRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerExtendedResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.RequestName)
+	copy(dAtA[i:], m.RequestName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestName)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.ResourceName)
+	copy(dAtA[i:], m.ResourceName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.ContainerName)
+	copy(dAtA[i:], m.ContainerName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func (m *ContainerImage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -9712,6 +9969,81 @@ func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *ContainerRestartRule) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerRestartRule) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerRestartRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ExitCodes != nil {
+		{
+			size, err := m.ExitCodes.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Action)
+	copy(dAtA[i:], m.Action)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerRestartRuleOnExitCodes) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerRestartRuleOnExitCodes) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerRestartRuleOnExitCodes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintGenerated(dAtA, i, uint64(m.Values[iNdEx]))
+			i--
+			dAtA[i] = 0x10
+		}
+	}
+	i -= len(m.Operator)
+	copy(dAtA[i:], m.Operator)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func (m *ContainerState) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -10681,6 +11013,18 @@ func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.FileKeyRef != nil {
+		{
+			size, err := m.FileKeyRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
 	if m.SecretKeyRef != nil {
 		{
 			size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i])
@@ -10790,6 +11134,22 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error
 	_ = i
 	var l int
 	_ = l
+	if len(m.RestartPolicyRules) > 0 {
+		for iNdEx := len(m.RestartPolicyRules) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.RestartPolicyRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xca
+		}
+	}
 	if m.RestartPolicy != nil {
 		i -= len(*m.RestartPolicy)
 		copy(dAtA[i:], *m.RestartPolicy)
@@ -11426,6 +11786,54 @@ func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *FileKeySelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FileKeySelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FileKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeName)
+	copy(dAtA[i:], m.VolumeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -15791,6 +16199,59 @@ func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *PodCertificateProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodCertificateProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.CertificateChainPath)
+	copy(dAtA[i:], m.CertificateChainPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChainPath)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.KeyPath)
+	copy(dAtA[i:], m.KeyPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyPath)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.CredentialBundlePath)
+	copy(dAtA[i:], m.CredentialBundlePath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CredentialBundlePath)))
+	i--
+	dAtA[i] = 0x22
+	if m.MaxExpirationSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
+		i--
+		dAtA[i] = 0x18
+	}
+	i -= len(m.KeyType)
+	copy(dAtA[i:], m.KeyType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.SignerName)
+	copy(dAtA[i:], m.SignerName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func (m *PodCondition) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -16016,6 +16477,48 @@ func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *PodExtendedResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodExtendedResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodExtendedResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ResourceClaimName)
+	copy(dAtA[i:], m.ResourceClaimName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClaimName)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.RequestMappings) > 0 {
+		for iNdEx := len(m.RequestMappings) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.RequestMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
 func (m *PodIP) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -16597,6 +17100,15 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.HostnameOverride != nil {
+		i -= len(*m.HostnameOverride)
+		copy(dAtA[i:], *m.HostnameOverride)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HostnameOverride)))
+		i--
+		dAtA[i] = 0x2
+		i--
+		dAtA[i] = 0xca
+	}
 	if m.Resources != nil {
 		{
 			size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
@@ -17085,6 +17597,20 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.ExtendedResourceClaimStatus != nil {
+		{
+			size, err := m.ExtendedResourceClaimStatus.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x92
+	}
 	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
 	i--
 	dAtA[i] = 0x1
@@ -21108,6 +21634,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.PodCertificate != nil {
+		{
+			size, err := m.PodCertificate.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
 	if m.ClusterTrustBundle != nil {
 		{
 			size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i])
@@ -22471,6 +23009,27 @@ func (m *Container) Size() (n int) {
 		l = len(*m.RestartPolicy)
 		n += 2 + l + sovGenerated(uint64(l))
 	}
+	if len(m.RestartPolicyRules) > 0 {
+		for _, e := range m.RestartPolicyRules {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ContainerExtendedResourceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ContainerName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ResourceName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RequestName)
+	n += 1 + l + sovGenerated(uint64(l))
 	return n
 }
 
@@ -22520,6 +23079,37 @@ func (m *ContainerResizePolicy) Size() (n int) {
 	return n
 }
 
+func (m *ContainerRestartRule) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Action)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ExitCodes != nil {
+		l = m.ExitCodes.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ContainerRestartRuleOnExitCodes) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Operator)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Values) > 0 {
+		for _, e := range m.Values {
+			n += 1 + sovGenerated(uint64(e))
+		}
+	}
+	return n
+}
+
 func (m *ContainerState) Size() (n int) {
 	if m == nil {
 		return 0
@@ -22896,6 +23486,10 @@ func (m *EnvVarSource) Size() (n int) {
 		l = m.SecretKeyRef.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
+	if m.FileKeyRef != nil {
+		l = m.FileKeyRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -23007,6 +23601,12 @@ func (m *EphemeralContainerCommon) Size() (n int) {
 		l = len(*m.RestartPolicy)
 		n += 2 + l + sovGenerated(uint64(l))
 	}
+	if len(m.RestartPolicyRules) > 0 {
+		for _, e := range m.RestartPolicyRules {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
 	return n
 }
 
@@ -23149,6 +23749,24 @@ func (m *FCVolumeSource) Size() (n int) {
 	return n
 }
 
+func (m *FileKeySelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
 func (m *FlexPersistentVolumeSource) Size() (n int) {
 	if m == nil {
 		return 0
@@ -24752,6 +25370,28 @@ func (m *PodAttachOptions) Size() (n int) {
 	return n
 }
 
+func (m *PodCertificateProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.SignerName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.KeyType)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.MaxExpirationSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
+	}
+	l = len(m.CredentialBundlePath)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.KeyPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.CertificateChainPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
 func (m *PodCondition) Size() (n int) {
 	if m == nil {
 		return 0
@@ -24837,6 +25477,23 @@ func (m *PodExecOptions) Size() (n int) {
 	return n
 }
 
+func (m *PodExtendedResourceClaimStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.RequestMappings) > 0 {
+		for _, e := range m.RequestMappings {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.ResourceClaimName)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
 func (m *PodIP) Size() (n int) {
 	if m == nil {
 		return 0
@@ -25224,6 +25881,10 @@ func (m *PodSpec) Size() (n int) {
 		l = m.Resources.Size()
 		n += 2 + l + sovGenerated(uint64(l))
 	}
+	if m.HostnameOverride != nil {
+		l = len(*m.HostnameOverride)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -25296,6 +25957,10 @@ func (m *PodStatus) Size() (n int) {
 		}
 	}
 	n += 2 + sovGenerated(uint64(m.ObservedGeneration))
+	if m.ExtendedResourceClaimStatus != nil {
+		l = m.ExtendedResourceClaimStatus.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -26751,6 +27416,10 @@ func (m *VolumeProjection) Size() (n int) {
 		l = m.ClusterTrustBundle.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
+	if m.PodCertificate != nil {
+		l = m.PodCertificate.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -27426,6 +28095,11 @@ func (this *Container) String() string {
 		repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + ","
 	}
 	repeatedStringForResizePolicy += "}"
+	repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{"
+	for _, f := range this.RestartPolicyRules {
+		repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRestartPolicyRules += "}"
 	s := strings.Join([]string{`&Container{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
@@ -27451,6 +28125,19 @@ func (this *Container) String() string {
 		`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
 		`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
 		`RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
+		`RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerExtendedResourceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerExtendedResourceRequest{`,
+		`ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`,
+		`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
+		`RequestName:` + fmt.Sprintf("%v", this.RequestName) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -27491,6 +28178,28 @@ func (this *ContainerResizePolicy) String() string {
 	}, "")
 	return s
 }
+func (this *ContainerRestartRule) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerRestartRule{`,
+		`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
+		`ExitCodes:` + strings.Replace(this.ExitCodes.String(), "ContainerRestartRuleOnExitCodes", "ContainerRestartRuleOnExitCodes", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerRestartRuleOnExitCodes) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerRestartRuleOnExitCodes{`,
+		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *ContainerState) String() string {
 	if this == nil {
 		return "nil"
@@ -27777,6 +28486,7 @@ func (this *EnvVarSource) String() string {
 		`ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`,
 		`ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`,
 		`SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`,
+		`FileKeyRef:` + strings.Replace(this.FileKeyRef.String(), "FileKeySelector", "FileKeySelector", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -27826,6 +28536,11 @@ func (this *EphemeralContainerCommon) String() string {
 		repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + ","
 	}
 	repeatedStringForResizePolicy += "}"
+	repeatedStringForRestartPolicyRules := "[]ContainerRestartRule{"
+	for _, f := range this.RestartPolicyRules {
+		repeatedStringForRestartPolicyRules += strings.Replace(strings.Replace(f.String(), "ContainerRestartRule", "ContainerRestartRule", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRestartPolicyRules += "}"
 	s := strings.Join([]string{`&EphemeralContainerCommon{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
@@ -27851,6 +28566,7 @@ func (this *EphemeralContainerCommon) String() string {
 		`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
 		`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
 		`RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
+		`RestartPolicyRules:` + repeatedStringForRestartPolicyRules + `,`,
 		`}`,
 	}, "")
 	return s
@@ -27951,6 +28667,19 @@ func (this *FCVolumeSource) String() string {
 	}, "")
 	return s
 }
+func (this *FileKeySelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&FileKeySelector{`,
+		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *FlexPersistentVolumeSource) String() string {
 	if this == nil {
 		return "nil"
@@ -29169,6 +29898,21 @@ func (this *PodAttachOptions) String() string {
 	}, "")
 	return s
 }
+func (this *PodCertificateProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodCertificateProjection{`,
+		`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+		`KeyType:` + fmt.Sprintf("%v", this.KeyType) + `,`,
+		`MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
+		`CredentialBundlePath:` + fmt.Sprintf("%v", this.CredentialBundlePath) + `,`,
+		`KeyPath:` + fmt.Sprintf("%v", this.KeyPath) + `,`,
+		`CertificateChainPath:` + fmt.Sprintf("%v", this.CertificateChainPath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *PodCondition) String() string {
 	if this == nil {
 		return "nil"
@@ -29228,6 +29972,22 @@ func (this *PodExecOptions) String() string {
 	}, "")
 	return s
 }
+func (this *PodExtendedResourceClaimStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRequestMappings := "[]ContainerExtendedResourceRequest{"
+	for _, f := range this.RequestMappings {
+		repeatedStringForRequestMappings += strings.Replace(strings.Replace(f.String(), "ContainerExtendedResourceRequest", "ContainerExtendedResourceRequest", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRequestMappings += "}"
+	s := strings.Join([]string{`&PodExtendedResourceClaimStatus{`,
+		`RequestMappings:` + repeatedStringForRequestMappings + `,`,
+		`ResourceClaimName:` + fmt.Sprintf("%v", this.ResourceClaimName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *PodIP) String() string {
 	if this == nil {
 		return "nil"
@@ -29503,6 +30263,7 @@ func (this *PodSpec) String() string {
 		`SchedulingGates:` + repeatedStringForSchedulingGates + `,`,
 		`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
 		`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
+		`HostnameOverride:` + valueToStringGenerated(this.HostnameOverride) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -29564,6 +30325,7 @@ func (this *PodStatus) String() string {
 		`ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`,
 		`HostIPs:` + repeatedStringForHostIPs + `,`,
 		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+		`ExtendedResourceClaimStatus:` + strings.Replace(this.ExtendedResourceClaimStatus.String(), "PodExtendedResourceClaimStatus", "PodExtendedResourceClaimStatus", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -30673,6 +31435,7 @@ func (this *VolumeProjection) String() string {
 		`ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`,
 		`ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`,
 		`ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`,
+		`PodCertificate:` + strings.Replace(this.PodCertificate.String(), "PodCertificateProjection", "PodCertificateProjection", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -36465,61 +37228,11 @@ func (m *Container) Unmarshal(dAtA []byte) error {
 			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
 			m.RestartPolicy = &s
 			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ContainerImage) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
+		case 25:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -36529,43 +37242,26 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Names = append(m.Names, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
-			}
-			m.SizeBytes = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.SizeBytes |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
+			m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{})
+			if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
 			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -36587,7 +37283,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *ContainerPort) Unmarshal(dAtA []byte) error {
+func (m *ContainerExtendedResourceRequest) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -36610,15 +37306,15 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
+			return fmt.Errorf("proto: ContainerExtendedResourceRequest: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: ContainerExtendedResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -36646,49 +37342,11 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Name = string(dAtA[iNdEx:postIndex])
+			m.ContainerName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
-			}
-			m.HostPort = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.HostPort |= int32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
-			}
-			m.ContainerPort = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ContainerPort |= int32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -36716,11 +37374,11 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			m.ResourceName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
-		case 5:
+		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestName", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -36748,7 +37406,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.HostIP = string(dAtA[iNdEx:postIndex])
+			m.RequestName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -36771,7 +37429,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
+func (m *ContainerImage) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -36794,15 +37452,15 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group")
+			return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -36830,13 +37488,13 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ResourceName = ResourceName(dAtA[iNdEx:postIndex])
+			m.Names = append(m.Names, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
 			}
-			var stringLen uint64
+			m.SizeBytes = 0
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -36846,24 +37504,585 @@ func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				m.SizeBytes |= int64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerPort) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
+			}
+			m.HostPort = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HostPort |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
+			}
+			m.ContainerPort = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ContainerPort |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HostIP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceName = ResourceName(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerRestartRule) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerRestartRule: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerRestartRule: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Action = ContainerRestartRuleAction(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitCodes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ExitCodes == nil {
+				m.ExitCodes = &ContainerRestartRuleOnExitCodes{}
+			}
+			if err := m.ExitCodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerRestartRuleOnExitCodes) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerRestartRuleOnExitCodes: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operator = ContainerRestartRuleOnExitCodesOperator(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType == 0 {
+				var v int32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= int32(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Values = append(m.Values, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Values) == 0 {
+					m.Values = make([]int32, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v int32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= int32(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Values = append(m.Values, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -38790,13 +40009,196 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.IP = string(dAtA[iNdEx:postIndex])
+			m.IP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.TargetRef == nil {
+				m.TargetRef = &ObjectReference{}
+			}
+			if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Hostname = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.NodeName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EndpointPort) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType)
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
 			}
-			var msglen int
+			m.Port = 0
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -38806,31 +40208,14 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= int(b&0x7F) << shift
+				m.Port |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.TargetRef == nil {
-				m.TargetRef = &ObjectReference{}
-			}
-			if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -38858,11 +40243,11 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Hostname = string(dAtA[iNdEx:postIndex])
+			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -38891,7 +40276,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
 				return io.ErrUnexpectedEOF
 			}
 			s := string(dAtA[iNdEx:postIndex])
-			m.NodeName = &s
+			m.AppProtocol = &s
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -38914,7 +40299,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *EndpointPort) Unmarshal(dAtA []byte) error {
+func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -38937,17 +40322,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
+			return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -38957,48 +40342,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Name = string(dAtA[iNdEx:postIndex])
+			m.Addresses = append(m.Addresses, EndpointAddress{})
+			if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
-			}
-			m.Port = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Port |= int32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -39008,29 +40376,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{})
+			if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
-		case 4:
+		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -39040,24 +40410,25 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			s := string(dAtA[iNdEx:postIndex])
-			m.AppProtocol = &s
+			m.Ports = append(m.Ports, EndpointPort{})
+			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -39080,7 +40451,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
+func (m *Endpoints) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -39103,15 +40474,15 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group")
+			return fmt.Errorf("proto: Endpoints: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39138,48 +40509,13 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Addresses = append(m.Addresses, EndpointAddress{})
-			if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{})
-			if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39206,8 +40542,8 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Ports = append(m.Ports, EndpointPort{})
-			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			m.Subsets = append(m.Subsets, EndpointSubset{})
+			if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -39232,7 +40568,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *Endpoints) Unmarshal(dAtA []byte) error {
+func (m *EndpointsList) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -39255,15 +40591,15 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: Endpoints: wiretype end group for non-group")
+			return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39290,13 +40626,13 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39323,8 +40659,8 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Subsets = append(m.Subsets, EndpointSubset{})
-			if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			m.Items = append(m.Items, Endpoints{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -39349,7 +40685,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *EndpointsList) Unmarshal(dAtA []byte) error {
+func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -39372,15 +40708,47 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group")
+			return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Prefix = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39407,13 +40775,16 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if m.ConfigMapRef == nil {
+				m.ConfigMapRef = &ConfigMapEnvSource{}
+			}
+			if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		case 2:
+		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39440,8 +40811,10 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Items = append(m.Items, Endpoints{})
-			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretEnvSource{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -39466,7 +40839,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
+func (m *EnvVar) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -39489,15 +40862,15 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group")
+			return fmt.Errorf("proto: EnvVar: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -39525,13 +40898,13 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Prefix = string(dAtA[iNdEx:postIndex])
+			m.Name = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
 			}
-			var msglen int
+			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -39541,31 +40914,27 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= int(b&0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			if msglen < 0 {
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + msglen
+			postIndex := iNdEx + intStringLen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.ConfigMapRef == nil {
-				m.ConfigMapRef = &ConfigMapEnvSource{}
-			}
-			if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
+			m.Value = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39592,10 +40961,10 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.SecretRef == nil {
-				m.SecretRef = &SecretEnvSource{}
+			if m.ValueFrom == nil {
+				m.ValueFrom = &EnvVarSource{}
 			}
-			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -39620,7 +40989,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *EnvVar) Unmarshal(dAtA []byte) error {
+func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -39643,79 +41012,15 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: EnvVar: wiretype end group for non-group")
+			return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Value = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39742,66 +41047,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.ValueFrom == nil {
-				m.ValueFrom = &EnvVarSource{}
+			if m.FieldRef == nil {
+				m.FieldRef = &ObjectFieldSelector{}
 			}
-			if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
+		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39828,16 +41083,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.FieldRef == nil {
-				m.FieldRef = &ObjectFieldSelector{}
+			if m.ResourceFieldRef == nil {
+				m.ResourceFieldRef = &ResourceFieldSelector{}
 			}
-			if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		case 2:
+		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39864,16 +41119,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.ResourceFieldRef == nil {
-				m.ResourceFieldRef = &ResourceFieldSelector{}
+			if m.ConfigMapKeyRef == nil {
+				m.ConfigMapKeyRef = &ConfigMapKeySelector{}
 			}
-			if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		case 3:
+		case 4:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39900,16 +41155,16 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.ConfigMapKeyRef == nil {
-				m.ConfigMapKeyRef = &ConfigMapKeySelector{}
+			if m.SecretKeyRef == nil {
+				m.SecretKeyRef = &SecretKeySelector{}
 			}
-			if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		case 4:
+		case 5:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field FileKeyRef", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -39936,10 +41191,10 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.SecretKeyRef == nil {
-				m.SecretKeyRef = &SecretKeySelector{}
+			if m.FileKeyRef == nil {
+				m.FileKeyRef = &FileKeySelector{}
 			}
-			if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.FileKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -40700,46 +41955,116 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.EnvFrom = append(m.EnvFrom, EnvFromSource{})
-			if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 20:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
+			m.EnvFrom = append(m.EnvFrom, EnvFromSource{})
+			if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 20:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 21:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{})
+			if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StartupProbe == nil {
+				m.StartupProbe = &Probe{}
 			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
+			if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
 			}
-			m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
-		case 21:
+		case 23:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -40766,16 +42091,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{})
-			if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{})
+			if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		case 22:
+		case 24:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
 			}
-			var msglen int
+			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -40785,31 +42110,28 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= int(b&0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			if msglen < 0 {
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + msglen
+			postIndex := iNdEx + intStringLen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.StartupProbe == nil {
-				m.StartupProbe = &Probe{}
-			}
-			if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
+			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
+			m.RestartPolicy = &s
 			iNdEx = postIndex
-		case 23:
+		case 25:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicyRules", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -40836,44 +42158,11 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{})
-			if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			m.RestartPolicyRules = append(m.RestartPolicyRules, ContainerRestartRule{})
+			if err := m.RestartPolicyRules[len(m.RestartPolicyRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
-		case 24:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
-			m.RestartPolicy = &s
-			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -42113,6 +43402,173 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *FileKeySelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FileKeySelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FileKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -55916,17 +57372,297 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group")
+			return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
+			if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
+			if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdout = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stderr = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TTY = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Container = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodCertificateProjection: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: PodCertificateProjection: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
 			}
-			var msglen int
+			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -55936,31 +57672,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= int(b&0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			if msglen < 0 {
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + msglen
+			postIndex := iNdEx + intStringLen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
-			if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
+			m.SignerName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field KeyType", wireType)
 			}
-			var msglen int
+			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -55970,81 +57704,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				msglen |= int(b&0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			if msglen < 0 {
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + msglen
+			postIndex := iNdEx + intStringLen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
-			if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
+			m.KeyType = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
+		case 3:
 			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
 			}
-			var v int
+			var v int32
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -56054,17 +57736,17 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= int(b&0x7F) << shift
+				v |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			m.Stdin = bool(v != 0)
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			m.MaxExpirationSeconds = &v
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType)
 			}
-			var v int
+			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -56074,37 +57756,29 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= int(b&0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			m.Stdout = bool(v != 0)
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
 			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
 			}
-			m.Stderr = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
 			}
-			var v int
+			m.CredentialBundlePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType)
+			}
+			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -56114,15 +57788,27 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= int(b&0x7F) << shift
+				stringLen |= uint64(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			m.TTY = bool(v != 0)
-		case 5:
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KeyPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -56150,7 +57836,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Container = string(dAtA[iNdEx:postIndex])
+			m.CertificateChainPath = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -56607,15 +58293,210 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group")
+			return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Value = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdout = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stderr = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TTY = bool(v != 0)
+		case 5:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -56643,11 +58524,11 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Name = string(dAtA[iNdEx:postIndex])
+			m.Container = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
-		case 2:
+		case 6:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -56675,8 +58556,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			s := string(dAtA[iNdEx:postIndex])
-			m.Value = &s
+			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -56699,7 +58579,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
+func (m *PodExtendedResourceClaimStatus) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
@@ -56722,97 +58602,17 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
 		fieldNum := int32(wire >> 3)
 		wireType := int(wire & 0x7)
 		if wireType == 4 {
-			return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group")
+			return fmt.Errorf("proto: PodExtendedResourceClaimStatus: wiretype end group for non-group")
 		}
 		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+			return fmt.Errorf("proto: PodExtendedResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
 		}
 		switch fieldNum {
 		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Stdin = bool(v != 0)
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Stdout = bool(v != 0)
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Stderr = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TTY = bool(v != 0)
-		case 5:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestMappings", wireType)
 			}
-			var stringLen uint64
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -56822,27 +58622,29 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
+				msglen |= int(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
+			if msglen < 0 {
 				return ErrInvalidLengthGenerated
 			}
-			postIndex := iNdEx + intStringLen
+			postIndex := iNdEx + msglen
 			if postIndex < 0 {
 				return ErrInvalidLengthGenerated
 			}
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Container = string(dAtA[iNdEx:postIndex])
+			m.RequestMappings = append(m.RequestMappings, ContainerExtendedResourceRequest{})
+			if err := m.RequestMappings[len(m.RequestMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
 			iNdEx = postIndex
-		case 6:
+		case 2:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -56870,7 +58672,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+			m.ResourceClaimName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -60088,6 +61890,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 41:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostnameOverride", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.HostnameOverride = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -60687,6 +62522,42 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
 					break
 				}
 			}
+		case 18:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceClaimStatus", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ExtendedResourceClaimStatus == nil {
+				m.ExtendedResourceClaimStatus = &PodExtendedResourceClaimStatus{}
+			}
+			if err := m.ExtendedResourceClaimStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -73489,6 +75360,42 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodCertificate", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PodCertificate == nil {
+				m.PodCertificate = &PodCertificateProjection{}
+			}
+			if err := m.PodCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index 9b48fb1c3..fb2695314 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -737,8 +737,8 @@ message Container {
   repeated ContainerPort ports = 6;
 
   // List of sources to populate environment variables in the container.
-  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
-  // will be reported as an event when the container is starting. When a key exists in multiple
+  // The keys defined within a source may consist of any printable ASCII characters except '='.
+  // When a key exists in multiple
   // sources, the value associated with the last source will take precedence.
   // Values defined by an Env with a duplicate key will take precedence.
   // Cannot be updated.
@@ -768,10 +768,10 @@ message Container {
   repeated ContainerResizePolicy resizePolicy = 23;
 
   // RestartPolicy defines the restart behavior of individual containers in a pod.
-  // This field may only be set for init containers, and the only allowed value is "Always".
-  // For non-init containers or when this field is not specified,
+  // This overrides the pod-level restart policy. When this field is not specified,
   // the restart behavior is defined by the Pod's restart policy and the container type.
-  // Setting the RestartPolicy as "Always" for the init container will have the following effect:
+  // Additionally, setting the RestartPolicy as "Always" for the init container will
+  // have the following effect:
   // this init container will be continually restarted on
   // exit until all regular containers have terminated. Once all regular
   // containers have completed, all init containers with restartPolicy "Always"
@@ -786,6 +786,22 @@ message Container {
   // +optional
   optional string restartPolicy = 24;
 
+  // Represents a list of rules to be checked to determine if the
+  // container should be restarted on exit. The rules are evaluated in
+  // order. Once a rule matches a container exit condition, the remaining
+  // rules are ignored. If no rule matches the container exit condition,
+  // the Container-level restart policy determines the whether the container
+  // is restarted or not. Constraints on the rules:
+  // - At most 20 rules are allowed.
+  // - Rules can have the same action.
+  // - Identical rules are not forbidden in validations.
+  // When rules are specified, container MUST set RestartPolicy explicitly
+  // even it if matches the Pod's RestartPolicy.
+  // +featureGate=ContainerRestartRules
+  // +optional
+  // +listType=atomic
+  repeated ContainerRestartRule restartPolicyRules = 25;
+
   // Pod volumes to mount into the container's filesystem.
   // Cannot be updated.
   // +optional
@@ -888,6 +904,19 @@ message Container {
   optional bool tty = 18;
 }
 
+// ContainerExtendedResourceRequest has the mapping of container name,
+// extended resource name to the device request name.
+message ContainerExtendedResourceRequest {
+  // The name of the container requesting resources.
+  optional string containerName = 1;
+
+  // The name of the extended resource in that container which gets backed by DRA.
+  optional string resourceName = 2;
+
+  // The name of the request in the special ResourceClaim which corresponds to the extended resource.
+  optional string requestName = 3;
+}
+
 // Describe a container image
 message ContainerImage {
   // Names by which this image is known.
@@ -942,6 +971,39 @@ message ContainerResizePolicy {
   optional string restartPolicy = 2;
 }
 
+// ContainerRestartRule describes how a container exit is handled.
+message ContainerRestartRule {
+  // Specifies the action taken on a container exit if the requirements
+  // are satisfied. The only possible value is "Restart" to restart the
+  // container.
+  // +required
+  optional string action = 1;
+
+  // Represents the exit codes to check on container exits.
+  // +optional
+  // +oneOf=when
+  optional ContainerRestartRuleOnExitCodes exitCodes = 2;
+}
+
+// ContainerRestartRuleOnExitCodes describes the condition
+// for handling an exited container based on its exit codes.
+message ContainerRestartRuleOnExitCodes {
+  // Represents the relationship between the container exit code(s) and the
+  // specified values. Possible values are:
+  // - In: the requirement is satisfied if the container exit code is in the
+  //   set of specified values.
+  // - NotIn: the requirement is satisfied if the container exit code is
+  //   not in the set of specified values.
+  // +required
+  optional string operator = 1;
+
+  // Specifies the set of values to check for container exit codes.
+  // At most 255 elements are allowed.
+  // +optional
+  // +listType=set
+  repeated int32 values = 2;
+}
+
 // ContainerState holds a possible state of container.
 // Only one of its members may be specified.
 // If none of them is specified, the default one is ContainerStateWaiting.
@@ -1344,7 +1406,8 @@ message EndpointsList {
 
 // EnvFromSource represents the source of a set of ConfigMaps or Secrets
 message EnvFromSource {
-  // Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
+  // Optional text to prepend to the name of each environment variable.
+  // May consist of any printable ASCII characters except '='.
   // +optional
   optional string prefix = 1;
 
@@ -1359,7 +1422,8 @@ message EnvFromSource {
 
 // EnvVar represents an environment variable present in a Container.
 message EnvVar {
-  // Name of the environment variable. Must be a C_IDENTIFIER.
+  // Name of the environment variable.
+  // May consist of any printable ASCII characters except '='.
   optional string name = 1;
 
   // Variable references $(VAR_NAME) are expanded
@@ -1398,6 +1462,13 @@ message EnvVarSource {
   // Selects a key of a secret in the pod's namespace
   // +optional
   optional SecretKeySelector secretKeyRef = 4;
+
+  // FileKeyRef selects a key of the env file.
+  // Requires the EnvFiles feature gate to be enabled.
+  //
+  // +featureGate=EnvFiles
+  // +optional
+  optional FileKeySelector fileKeyRef = 5;
 }
 
 // An EphemeralContainer is a temporary container that you may add to an existing Pod for
@@ -1479,8 +1550,8 @@ message EphemeralContainerCommon {
   repeated ContainerPort ports = 6;
 
   // List of sources to populate environment variables in the container.
-  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
-  // will be reported as an event when the container is starting. When a key exists in multiple
+  // The keys defined within a source may consist of any printable ASCII characters except '='.
+  // When a key exists in multiple
   // sources, the value associated with the last source will take precedence.
   // Values defined by an Env with a duplicate key will take precedence.
   // Cannot be updated.
@@ -1510,12 +1581,19 @@ message EphemeralContainerCommon {
 
   // Restart policy for the container to manage the restart behavior of each
   // container within a pod.
-  // This may only be set for init containers. You cannot set this field on
-  // ephemeral containers.
+  // You cannot set this field on ephemeral containers.
   // +featureGate=SidecarContainers
   // +optional
   optional string restartPolicy = 24;
 
+  // Represents a list of rules to be checked to determine if the
+  // container should be restarted on exit. You cannot set this field on
+  // ephemeral containers.
+  // +featureGate=ContainerRestartRules
+  // +optional
+  // +listType=atomic
+  repeated ContainerRestartRule restartPolicyRules = 25;
+
   // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
   // Cannot be updated.
   // +optional
@@ -1776,6 +1854,36 @@ message FCVolumeSource {
   repeated string wwids = 5;
 }
 
+// FileKeySelector selects a key of the env file.
+// +structType=atomic
+message FileKeySelector {
+  // The name of the volume mount containing the env file.
+  // +required
+  optional string volumeName = 1;
+
+  // The path within the volume from which to select the file.
+  // Must be relative and may not contain the '..' path or start with '..'.
+  // +required
+  optional string path = 2;
+
+  // The key within the env file. An invalid key will prevent the pod from starting.
+  // The keys defined within a source may consist of any printable ASCII characters except '='.
+  // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
+  // +required
+  optional string key = 3;
+
+  // Specify whether the file or its key must be defined. If the file or key
+  // does not exist, then the env var is not published.
+  // If optional is set to true and the specified key does not exist,
+  // the environment variable will not be set in the Pod's containers.
+  //
+  // If optional is set to false and the specified key does not exist,
+  // an error will be returned during Pod creation.
+  // +optional
+  // +default=false
+  optional bool optional = 4;
+}
+
 // FlexPersistentVolumeSource represents a generic persistent volume resource that is
 // provisioned/attached using an exec based plugin.
 message FlexPersistentVolumeSource {
@@ -1949,7 +2057,6 @@ message GlusterfsPersistentVolumeSource {
 // Glusterfs volumes do not support ownership management or SELinux relabeling.
 message GlusterfsVolumeSource {
   // endpoints is the endpoint name that details Glusterfs topology.
-  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
   optional string endpoints = 1;
 
   // path is the Glusterfs volume path.
@@ -3160,15 +3267,13 @@ message PersistentVolumeClaimSpec {
   // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
   // If specified, the CSI driver will create or update the volume with the attributes defined
   // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
-  // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
-  // will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
-  // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
-  // will be set by the persistentvolume controller if it exists.
+  // it can be changed after the claim is created. An empty string or nil value indicates that no
+  // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
+  // this field can be reset to its previous value (including nil) to cancel the modification.
   // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
   // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
   // exists.
   // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
-  // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
   // +featureGate=VolumeAttributesClass
   // +optional
   optional string volumeAttributesClassName = 9;
@@ -3267,14 +3372,12 @@ message PersistentVolumeClaimStatus {
 
   // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
   // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
-  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
   // +featureGate=VolumeAttributesClass
   // +optional
   optional string currentVolumeAttributesClassName = 8;
 
   // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
   // When this is unset, there is no ModifyVolume operation being attempted.
-  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
   // +featureGate=VolumeAttributesClass
   // +optional
   optional ModifyVolumeStatus modifyVolumeStatus = 9;
@@ -3515,7 +3618,6 @@ message PersistentVolumeSpec {
   // after a volume has been updated successfully to a new class.
   // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
   // PersistentVolumeClaims during the binding process.
-  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
   // +featureGate=VolumeAttributesClass
   // +optional
   optional string volumeAttributesClassName = 10;
@@ -3684,8 +3786,8 @@ message PodAntiAffinity {
   // most preferred is the one with the greatest sum of weights, i.e.
   // for each node that meets all of the scheduling requirements (resource
   // request, requiredDuringScheduling anti-affinity expressions, etc.),
-  // compute a sum by iterating through the elements of this field and adding
-  // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+  // compute a sum by iterating through the elements of this field and subtracting
+  // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
   // node(s) with the highest sum are the most preferred.
   // +optional
   // +listType=atomic
@@ -3725,6 +3827,79 @@ message PodAttachOptions {
   optional string container = 5;
 }
 
+// PodCertificateProjection provides a private key and X.509 certificate in the
+// pod filesystem.
+message PodCertificateProjection {
+  // Kubelet's generated CSRs will be addressed to this signer.
+  //
+  // +required
+  optional string signerName = 1;
+
+  // The type of keypair Kubelet will generate for the pod.
+  //
+  // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
+  // "ECDSAP521", and "ED25519".
+  //
+  // +required
+  optional string keyType = 2;
+
+  // maxExpirationSeconds is the maximum lifetime permitted for the
+  // certificate.
+  //
+  // Kubelet copies this value verbatim into the PodCertificateRequests it
+  // generates for this projection.
+  //
+  // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+  // will reject values shorter than 3600 (1 hour).  The maximum allowable
+  // value is 7862400 (91 days).
+  //
+  // The signer implementation is then free to issue a certificate with any
+  // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+  // seconds (1 hour).  This constraint is enforced by kube-apiserver.
+  // `kubernetes.io` signers will never issue certificates with a lifetime
+  // longer than 24 hours.
+  //
+  // +optional
+  optional int32 maxExpirationSeconds = 3;
+
+  // Write the credential bundle at this path in the projected volume.
+  //
+  // The credential bundle is a single file that contains multiple PEM blocks.
+  // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
+  // key.
+  //
+  // The remaining blocks are CERTIFICATE blocks, containing the issued
+  // certificate chain from the signer (leaf and any intermediates).
+  //
+  // Using credentialBundlePath lets your Pod's application code make a single
+  // atomic read that retrieves a consistent key and certificate chain.  If you
+  // project them to separate files, your application code will need to
+  // additionally check that the leaf certificate was issued to the key.
+  //
+  // +optional
+  optional string credentialBundlePath = 4;
+
+  // Write the key at this path in the projected volume.
+  //
+  // Most applications should use credentialBundlePath.  When using keyPath
+  // and certificateChainPath, your application needs to check that the key
+  // and leaf certificate are consistent, because it is possible to read the
+  // files mid-rotation.
+  //
+  // +optional
+  optional string keyPath = 5;
+
+  // Write the certificate chain at this path in the projected volume.
+  //
+  // Most applications should use credentialBundlePath.  When using keyPath
+  // and certificateChainPath, your application needs to check that the key
+  // and leaf certificate are consistent, because it is possible to read the
+  // files mid-rotation.
+  //
+  // +optional
+  optional string certificateChainPath = 6;
+}
+
 // PodCondition contains details for the current condition of this pod.
 message PodCondition {
   // Type is the type of the condition.
@@ -3829,6 +4004,20 @@ message PodExecOptions {
   repeated string command = 6;
 }
 
+// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
+// resource requests backed by DRA. It stores the generated name for
+// the corresponding special ResourceClaim created by the scheduler.
+message PodExtendedResourceClaimStatus {
+  // RequestMappings identifies the mapping of  to  device request
+  // in the generated ResourceClaim.
+  // +listType=atomic
+  repeated ContainerExtendedResourceRequest requestMappings = 1;
+
+  // ResourceClaimName is the name of the ResourceClaim that was
+  // generated for the Pod in the namespace of the Pod.
+  optional string resourceClaimName = 2;
+}
+
 // PodIP represents a single IP address allocated to the pod.
 message PodIP {
   // IP is the IP address assigned to the pod
@@ -4269,7 +4458,9 @@ message PodSpec {
   optional string nodeName = 10;
 
   // Host networking requested for this pod. Use the host's network namespace.
-  // If this option is set, the ports that will be used must be specified.
+  // When using HostNetwork you should specify ports so the scheduler is aware.
+  // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
+  // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
   // Default to false.
   // +k8s:conversion-gen=false
   // +optional
@@ -4434,6 +4625,7 @@ message PodSpec {
   // - spec.hostPID
   // - spec.hostIPC
   // - spec.hostUsers
+  // - spec.resources
   // - spec.securityContext.appArmorProfile
   // - spec.securityContext.seLinuxOptions
   // - spec.securityContext.seccompProfile
@@ -4504,7 +4696,7 @@ message PodSpec {
 
   // Resources is the total amount of CPU and Memory resources required by all
   // containers in the pod. It supports specifying Requests and Limits for
-  // "cpu" and "memory" resource names only. ResourceClaims are not supported.
+  // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
   //
   // This field enables fine-grained control over resource allocation for the
   // entire pod, allowing resource sharing among containers in a pod.
@@ -4516,6 +4708,21 @@ message PodSpec {
   // +featureGate=PodLevelResources
   // +optional
   optional ResourceRequirements resources = 40;
+
+  // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
+  // This field only specifies the pod's hostname and does not affect its DNS records.
+  // When this field is set to a non-empty string:
+  // - It takes precedence over the values set in `hostname` and `subdomain`.
+  // - The Pod's hostname will be set to this value.
+  // - `setHostnameAsFQDN` must be nil or set to false.
+  // - `hostNetwork` must be set to false.
+  //
+  // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
+  // Requires the HostnameOverride feature gate to be enabled.
+  //
+  // +featureGate=HostnameOverride
+  // +optional
+  optional string hostnameOverride = 41;
 }
 
 // PodStatus represents information about the status of a pod. Status may trail the actual
@@ -4674,6 +4881,11 @@ message PodStatus {
   // +featureGate=DynamicResourceAllocation
   // +optional
   repeated PodResourceClaimStatus resourceClaimStatuses = 15;
+
+  // Status of extended resource claim backed by DRA.
+  // +featureGate=DRAExtendedResource
+  // +optional
+  optional PodExtendedResourceClaimStatus extendedResourceClaimStatus = 18;
 }
 
 // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
@@ -5298,7 +5510,7 @@ message ResourceRequirements {
   // Claims lists the names of resources, defined in spec.resourceClaims,
   // that are used by this container.
   //
-  // This is an alpha field and requires enabling the
+  // This field depends on the
   // DynamicResourceAllocation feature gate.
   //
   // This field is immutable. It can only be set for containers.
@@ -6301,7 +6513,6 @@ message Taint {
   optional string effect = 3;
 
   // TimeAdded represents the time at which the taint was added.
-  // It is only written for NoExecute taints.
   // +optional
   optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
 }
@@ -6682,6 +6893,44 @@ message VolumeProjection {
   // +featureGate=ClusterTrustBundleProjection
   // +optional
   optional ClusterTrustBundleProjection clusterTrustBundle = 5;
+
+  // Projects an auto-rotating credential bundle (private key and certificate
+  // chain) that the pod can use either as a TLS client or server.
+  //
+  // Kubelet generates a private key and uses it to send a
+  // PodCertificateRequest to the named signer.  Once the signer approves the
+  // request and issues a certificate chain, Kubelet writes the key and
+  // certificate chain to the pod filesystem.  The pod does not start until
+  // certificates have been issued for each podCertificate projected volume
+  // source in its spec.
+  //
+  // Kubelet will begin trying to rotate the certificate at the time indicated
+  // by the signer using the PodCertificateRequest.Status.BeginRefreshAt
+  // timestamp.
+  //
+  // Kubelet can write a single file, indicated by the credentialBundlePath
+  // field, or separate files, indicated by the keyPath and
+  // certificateChainPath fields.
+  //
+  // The credential bundle is a single file in PEM format.  The first PEM
+  // entry is the private key (in PKCS#8 format), and the remaining PEM
+  // entries are the certificate chain issued by the signer (typically,
+  // signers will return their certificate chain in leaf-to-root order).
+  //
+  // Prefer using the credential bundle format, since your application code
+  // can read it atomically.  If you use keyPath and certificateChainPath,
+  // your application must make two separate file reads. If these coincide
+  // with a certificate rotation, it is possible that the private key and leaf
+  // certificate you read may not correspond to each other.  Your application
+  // will need to check for this condition, and re-read until they are
+  // consistent.
+  //
+  // The named signer controls chooses the format of the certificate it
+  // issues; consult the signer implementation's documentation to learn how to
+  // use the certificates it issues.
+  //
+  // +featureGate=PodCertificateProjection +optional
+  optional PodCertificateProjection podCertificate = 6;
 }
 
 // VolumeResourceRequirements describes the storage resource requirements for a volume.
@@ -6753,13 +7002,12 @@ message VolumeSource {
 
   // iscsi represents an ISCSI Disk resource that is attached to a
   // kubelet's host machine and then exposed to the pod.
-  // More info: https://examples.k8s.io/volumes/iscsi/README.md
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
   // +optional
   optional ISCSIVolumeSource iscsi = 8;
 
   // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
   // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
-  // More info: https://examples.k8s.io/volumes/glusterfs/README.md
   // +optional
   optional GlusterfsVolumeSource glusterfs = 9;
 
@@ -6771,7 +7019,6 @@ message VolumeSource {
 
   // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
   // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
-  // More info: https://examples.k8s.io/volumes/rbd/README.md
   // +optional
   optional RBDVolumeSource rbd = 11;
 
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index f7641e485..08b6d351c 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -91,12 +91,11 @@ type VolumeSource struct {
 	NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
 	// iscsi represents an ISCSI Disk resource that is attached to a
 	// kubelet's host machine and then exposed to the pod.
-	// More info: https://examples.k8s.io/volumes/iscsi/README.md
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
 	// +optional
 	ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
 	// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
 	// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
-	// More info: https://examples.k8s.io/volumes/glusterfs/README.md
 	// +optional
 	Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
 	// persistentVolumeClaimVolumeSource represents a reference to a
@@ -106,7 +105,6 @@ type VolumeSource struct {
 	PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
 	// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
 	// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
-	// More info: https://examples.k8s.io/volumes/rbd/README.md
 	// +optional
 	RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
 	// flexVolume represents a generic volume resource that is
@@ -437,7 +435,6 @@ type PersistentVolumeSpec struct {
 	// after a volume has been updated successfully to a new class.
 	// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
 	// PersistentVolumeClaims during the binding process.
-	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
 	// +featureGate=VolumeAttributesClass
 	// +optional
 	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
@@ -616,15 +613,13 @@ type PersistentVolumeClaimSpec struct {
 	// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
 	// If specified, the CSI driver will create or update the volume with the attributes defined
 	// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
-	// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
-	// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
-	// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
-	// will be set by the persistentvolume controller if it exists.
+	// it can be changed after the claim is created. An empty string or nil value indicates that no
+	// VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
+	// this field can be reset to its previous value (including nil) to cancel the modification.
 	// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
 	// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
 	// exists.
 	// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
-	// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
 	// +featureGate=VolumeAttributesClass
 	// +optional
 	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
@@ -851,13 +846,11 @@ type PersistentVolumeClaimStatus struct {
 	AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
 	// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
 	// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
-	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
 	// +featureGate=VolumeAttributesClass
 	// +optional
 	CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
 	// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
 	// When this is unset, there is no ModifyVolume operation being attempted.
-	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
 	// +featureGate=VolumeAttributesClass
 	// +optional
 	ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
@@ -972,7 +965,6 @@ type EmptyDirVolumeSource struct {
 // Glusterfs volumes do not support ownership management or SELinux relabeling.
 type GlusterfsVolumeSource struct {
 	// endpoints is the endpoint name that details Glusterfs topology.
-	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
 	EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
 
 	// path is the Glusterfs volume path.
@@ -1993,6 +1985,79 @@ type ClusterTrustBundleProjection struct {
 	Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
 }
 
+// PodCertificateProjection provides a private key and X.509 certificate in the
+// pod filesystem.
+type PodCertificateProjection struct {
+	// Kubelet's generated CSRs will be addressed to this signer.
+	//
+	// +required
+	SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"`
+
+	// The type of keypair Kubelet will generate for the pod.
+	//
+	// Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
+	// "ECDSAP521", and "ED25519".
+	//
+	// +required
+	KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"`
+
+	// maxExpirationSeconds is the maximum lifetime permitted for the
+	// certificate.
+	//
+	// Kubelet copies this value verbatim into the PodCertificateRequests it
+	// generates for this projection.
+	//
+	// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+	// will reject values shorter than 3600 (1 hour).  The maximum allowable
+	// value is 7862400 (91 days).
+	//
+	// The signer implementation is then free to issue a certificate with any
+	// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+	// seconds (1 hour).  This constraint is enforced by kube-apiserver.
+	// `kubernetes.io` signers will never issue certificates with a lifetime
+	// longer than 24 hours.
+	//
+	// +optional
+	MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"`
+
+	// Write the credential bundle at this path in the projected volume.
+	//
+	// The credential bundle is a single file that contains multiple PEM blocks.
+	// The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
+	// key.
+	//
+	// The remaining blocks are CERTIFICATE blocks, containing the issued
+	// certificate chain from the signer (leaf and any intermediates).
+	//
+	// Using credentialBundlePath lets your Pod's application code make a single
+	// atomic read that retrieves a consistent key and certificate chain.  If you
+	// project them to separate files, your application code will need to
+	// additionally check that the leaf certificate was issued to the key.
+	//
+	// +optional
+	CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"`
+
+	// Write the key at this path in the projected volume.
+	//
+	// Most applications should use credentialBundlePath.  When using keyPath
+	// and certificateChainPath, your application needs to check that the key
+	// and leaf certificate are consistent, because it is possible to read the
+	// files mid-rotation.
+	//
+	// +optional
+	KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"`
+
+	// Write the certificate chain at this path in the projected volume.
+	//
+	// Most applications should use credentialBundlePath.  When using keyPath
+	// and certificateChainPath, your application needs to check that the key
+	// and leaf certificate are consistent, because it is possible to read the
+	// files mid-rotation.
+	//
+	// +optional
+	CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"`
+}
+
 // Represents a projected volume source
 type ProjectedVolumeSource struct {
 	// sources is the list of volume projections. Each entry in this list
@@ -2043,6 +2108,44 @@ type VolumeProjection struct {
 	// +featureGate=ClusterTrustBundleProjection
 	// +optional
 	ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
+
+	// Projects an auto-rotating credential bundle (private key and certificate
+	// chain) that the pod can use either as a TLS client or server.
+	//
+	// Kubelet generates a private key and uses it to send a
+	// PodCertificateRequest to the named signer.  Once the signer approves the
+	// request and issues a certificate chain, Kubelet writes the key and
+	// certificate chain to the pod filesystem.  The pod does not start until
+	// certificates have been issued for each podCertificate projected volume
+	// source in its spec.
+	//
+	// Kubelet will begin trying to rotate the certificate at the time indicated
+	// by the signer using the PodCertificateRequest.Status.BeginRefreshAt
+	// timestamp.
+	//
+	// Kubelet can write a single file, indicated by the credentialBundlePath
+	// field, or separate files, indicated by the keyPath and
+	// certificateChainPath fields.
+	//
+	// The credential bundle is a single file in PEM format.  The first PEM
+	// entry is the private key (in PKCS#8 format), and the remaining PEM
+	// entries are the certificate chain issued by the signer (typically,
+	// signers will return their certificate chain in leaf-to-root order).
+	//
+	// Prefer using the credential bundle format, since your application code
+	// can read it atomically.  If you use keyPath and certificateChainPath,
+	// your application must make two separate file reads. If these coincide
+	// with a certificate rotation, it is possible that the private key and leaf
+	// certificate you read may not correspond to each other.  Your application
+	// will need to check for this condition, and re-read until they are
+	// consistent.
+	//
+	// The named signer controls chooses the format of the certificate it
+	// issues; consult the signer implementation's documentation to learn how to
+	// use the certificates it issues.
+	//
+	// +featureGate=PodCertificateProjection +optional
+	PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"`
 }
 
 const (
@@ -2351,7 +2454,8 @@ type VolumeDevice struct {
 
 // EnvVar represents an environment variable present in a Container.
 type EnvVar struct {
-	// Name of the environment variable. Must be a C_IDENTIFIER.
+	// Name of the environment variable.
+	// May consist of any printable ASCII characters except '='.
 	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
 
 	// Optional: no more than one of the following may be specified.
@@ -2388,6 +2492,39 @@ type EnvVarSource struct {
 	// Selects a key of a secret in the pod's namespace
 	// +optional
 	SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
+	// FileKeyRef selects a key of the env file.
+	// Requires the EnvFiles feature gate to be enabled.
+	//
+	// +featureGate=EnvFiles
+	// +optional
+	FileKeyRef *FileKeySelector `json:"fileKeyRef,omitempty" protobuf:"bytes,5,opt,name=fileKeyRef"`
+}
+
+// FileKeySelector selects a key of the env file.
+// +structType=atomic
+type FileKeySelector struct {
+	// The name of the volume mount containing the env file.
+	// +required
+	VolumeName string `json:"volumeName" protobuf:"bytes,1,opt,name=volumeName"`
+	// The path within the volume from which to select the file.
+	// Must be relative and may not contain the '..' path or start with '..'.
+	// +required
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+	// The key within the env file. An invalid key will prevent the pod from starting.
+	// The keys defined within a source may consist of any printable ASCII characters except '='.
+	// During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
+	// +required
+	Key string `json:"key" protobuf:"bytes,3,opt,name=key"`
+	// Specify whether the file or its key must be defined. If the file or key
+	// does not exist, then the env var is not published.
+	// If optional is set to true and the specified key does not exist,
+	// the environment variable will not be set in the Pod's containers.
+	//
+	// If optional is set to false and the specified key does not exist,
+	// an error will be returned during Pod creation.
+	// +optional
+	// +default=false
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
 }
 
 // ObjectFieldSelector selects an APIVersioned field of an object.
@@ -2439,7 +2576,8 @@ type SecretKeySelector struct {
 
 // EnvFromSource represents the source of a set of ConfigMaps or Secrets
 type EnvFromSource struct {
-	// Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
+	// Optional text to prepend to the name of each environment variable.
+	// May consist of any printable ASCII characters except '='.
 	// +optional
 	Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
 	// The ConfigMap to select from
@@ -2697,7 +2835,7 @@ type ResourceRequirements struct {
 	// Claims lists the names of resources, defined in spec.resourceClaims,
 	// that are used by this container.
 	//
-	// This is an alpha field and requires enabling the
+	// This field depends on the
 	// DynamicResourceAllocation feature gate.
 	//
 	// This field is immutable. It can only be set for containers.
@@ -2805,8 +2943,8 @@ type Container struct {
 	// +listMapKey=protocol
 	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
 	// List of sources to populate environment variables in the container.
-	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
-	// will be reported as an event when the container is starting. When a key exists in multiple
+	// The keys defined within a source may consist of any printable ASCII characters except '='.
+	// When a key exists in multiple
 	// sources, the value associated with the last source will take precedence.
 	// Values defined by an Env with a duplicate key will take precedence.
 	// Cannot be updated.
@@ -2832,10 +2970,10 @@ type Container struct {
 	// +listType=atomic
 	ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
 	// RestartPolicy defines the restart behavior of individual containers in a pod.
-	// This field may only be set for init containers, and the only allowed value is "Always".
-	// For non-init containers or when this field is not specified,
+	// This overrides the pod-level restart policy. When this field is not specified,
 	// the restart behavior is defined by the Pod's restart policy and the container type.
-	// Setting the RestartPolicy as "Always" for the init container will have the following effect:
+	// Additionally, setting the RestartPolicy as "Always" for the init container will
+	// have the following effect:
 	// this init container will be continually restarted on
 	// exit until all regular containers have terminated. Once all regular
 	// containers have completed, all init containers with restartPolicy "Always"
@@ -2849,6 +2987,21 @@ type Container struct {
 	// +featureGate=SidecarContainers
 	// +optional
 	RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
+	// Represents a list of rules to be checked to determine if the
+	// container should be restarted on exit. The rules are evaluated in
+	// order. Once a rule matches a container exit condition, the remaining
+	// rules are ignored. If no rule matches the container exit condition,
+	// the Container-level restart policy determines the whether the container
+	// is restarted or not. Constraints on the rules:
+	// - At most 20 rules are allowed.
+	// - Rules can have the same action.
+	// - Identical rules are not forbidden in validations.
+	// When rules are specified, container MUST set RestartPolicy explicitly
+	// even it if matches the Pod's RestartPolicy.
+	// +featureGate=ContainerRestartRules
+	// +optional
+	// +listType=atomic
+	RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"`
 	// Pod volumes to mount into the container's filesystem.
 	// Cannot be updated.
 	// +optional
@@ -3478,11 +3631,64 @@ const (
 )
 
 // ContainerRestartPolicy is the restart policy for a single container.
-// This may only be set for init containers and only allowed value is "Always".
+// The only allowed values are "Always", "Never", and "OnFailure".
 type ContainerRestartPolicy string
 
 const (
-	ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
+	ContainerRestartPolicyAlways    ContainerRestartPolicy = "Always"
+	ContainerRestartPolicyNever     ContainerRestartPolicy = "Never"
+	ContainerRestartPolicyOnFailure ContainerRestartPolicy = "OnFailure"
+)
+
+// ContainerRestartRule describes how a container exit is handled.
+type ContainerRestartRule struct {
+	// Specifies the action taken on a container exit if the requirements
+	// are satisfied. The only possible value is "Restart" to restart the
+	// container.
+	// +required
+	Action ContainerRestartRuleAction `json:"action,omitempty" proto:"bytes,1,opt,name=action" protobuf:"bytes,1,opt,name=action,casttype=ContainerRestartRuleAction"`
+
+	// Represents the exit codes to check on container exits.
+	// +optional
+	// +oneOf=when
+	ExitCodes *ContainerRestartRuleOnExitCodes `json:"exitCodes,omitempty" proto:"bytes,2,opt,name=exitCodes" protobuf:"bytes,2,opt,name=exitCodes"`
+}
+
+// ContainerRestartRuleAction describes the action to take when the
+// container exits.
+type ContainerRestartRuleAction string
+
+// The only valid action is Restart.
+const (
+	ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart"
+)
+
+// ContainerRestartRuleOnExitCodes describes the condition
+// for handling an exited container based on its exit codes.
+type ContainerRestartRuleOnExitCodes struct {
+	// Represents the relationship between the container exit code(s) and the
+	// specified values. Possible values are:
+	// - In: the requirement is satisfied if the container exit code is in the
+	//   set of specified values.
+	// - NotIn: the requirement is satisfied if the container exit code is
+	//   not in the set of specified values.
+	// +required
+	Operator ContainerRestartRuleOnExitCodesOperator `json:"operator,omitempty" proto:"bytes,1,opt,name=operator" protobuf:"bytes,1,opt,name=operator,casttype=ContainerRestartRuleOnExitCodesOperator"`
+
+	// Specifies the set of values to check for container exit codes.
+	// At most 255 elements are allowed.
+	// +optional
+	// +listType=set
+	Values []int32 `json:"values,omitempty" proto:"varint,2,rep,name=values" protobuf:"varint,2,rep,name=values"`
+}
+
+// ContainerRestartRuleOnExitCodesOperator describes the operator
+// to take for the exit codes.
+type ContainerRestartRuleOnExitCodesOperator string
+
+const (
+	ContainerRestartRuleOnExitCodesOpIn    ContainerRestartRuleOnExitCodesOperator = "In"
+	ContainerRestartRuleOnExitCodesOpNotIn ContainerRestartRuleOnExitCodesOperator = "NotIn"
 )
 
 // DNSPolicy defines how a pod's DNS will be configured.
@@ -3678,8 +3884,8 @@ type PodAntiAffinity struct {
 	// most preferred is the one with the greatest sum of weights, i.e.
 	// for each node that meets all of the scheduling requirements (resource
 	// request, requiredDuringScheduling anti-affinity expressions, etc.),
-	// compute a sum by iterating through the elements of this field and adding
-	// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+	// compute a sum by iterating through the elements of this field and subtracting
+	// "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
 	// node(s) with the highest sum are the most preferred.
 	// +optional
 	// +listType=atomic
@@ -3806,7 +4012,6 @@ type Taint struct {
 	// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
 	Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
 	// TimeAdded represents the time at which the taint was added.
-	// It is only written for NoExecute taints.
 	// +optional
 	TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
 }
@@ -3983,7 +4188,9 @@ type PodSpec struct {
 	// +optional
 	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
 	// Host networking requested for this pod. Use the host's network namespace.
-	// If this option is set, the ports that will be used must be specified.
+	// When using HostNetwork you should specify ports so the scheduler is aware.
+	// When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
+	// and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
 	// Default to false.
 	// +k8s:conversion-gen=false
 	// +optional
@@ -4126,6 +4333,7 @@ type PodSpec struct {
 	// - spec.hostPID
 	// - spec.hostIPC
 	// - spec.hostUsers
+	// - spec.resources
 	// - spec.securityContext.appArmorProfile
 	// - spec.securityContext.seLinuxOptions
 	// - spec.securityContext.seccompProfile
@@ -4194,7 +4402,7 @@ type PodSpec struct {
 	ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
 	// Resources is the total amount of CPU and Memory resources required by all
 	// containers in the pod. It supports specifying Requests and Limits for
-	// "cpu" and "memory" resource names only. ResourceClaims are not supported.
+	// "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
 	//
 	// This field enables fine-grained control over resource allocation for the
 	// entire pod, allowing resource sharing among containers in a pod.
@@ -4206,6 +4414,20 @@ type PodSpec struct {
 	// +featureGate=PodLevelResources
 	// +optional
 	Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
+	// HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
+	// This field only specifies the pod's hostname and does not affect its DNS records.
+	// When this field is set to a non-empty string:
+	// - It takes precedence over the values set in `hostname` and `subdomain`.
+	// - The Pod's hostname will be set to this value.
+	// - `setHostnameAsFQDN` must be nil or set to false.
+	// - `hostNetwork` must be set to false.
+	//
+	// This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
+	// Requires the HostnameOverride feature gate to be enabled.
+	//
+	// +featureGate=HostnameOverride
+	// +optional
+	HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"`
 }
 
 // PodResourceClaim references exactly one ResourceClaim, either directly
@@ -4267,6 +4489,31 @@ type PodResourceClaimStatus struct {
 	ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"`
 }
 
+// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
+// resource requests backed by DRA. It stores the generated name for
+// the corresponding special ResourceClaim created by the scheduler.
+type PodExtendedResourceClaimStatus struct {
+	// RequestMappings identifies the mapping of  to  device request
+	// in the generated ResourceClaim.
+	// +listType=atomic
+	RequestMappings []ContainerExtendedResourceRequest `json:"requestMappings" protobuf:"bytes,1,rep,name=requestMappings"`
+
+	// ResourceClaimName is the name of the ResourceClaim that was
+	// generated for the Pod in the namespace of the Pod.
+	ResourceClaimName string `json:"resourceClaimName" protobuf:"bytes,2,name=resourceClaimName"`
+}
+
+// ContainerExtendedResourceRequest has the mapping of container name,
+// extended resource name to the device request name.
+type ContainerExtendedResourceRequest struct {
+	// The name of the container requesting resources.
+	ContainerName string `json:"containerName" protobuf:"bytes,1,name=containerName"`
+	// The name of the extended resource in that container which gets backed by DRA.
+	ResourceName string `json:"resourceName" protobuf:"bytes,2,name=resourceName"`
+	// The name of the request in the special ResourceClaim which corresponds to the extended resource.
+	RequestName string `json:"requestName" protobuf:"bytes,3,name=requestName"`
+}
+
 // OSName is the set of OS'es that can be used in OS.
 type OSName string
 
@@ -4799,8 +5046,8 @@ type EphemeralContainerCommon struct {
 	// +listMapKey=protocol
 	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
 	// List of sources to populate environment variables in the container.
-	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
-	// will be reported as an event when the container is starting. When a key exists in multiple
+	// The keys defined within a source may consist of any printable ASCII characters except '='.
+	// When a key exists in multiple
 	// sources, the value associated with the last source will take precedence.
 	// Values defined by an Env with a duplicate key will take precedence.
 	// Cannot be updated.
@@ -4826,11 +5073,17 @@ type EphemeralContainerCommon struct {
 	ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
 	// Restart policy for the container to manage the restart behavior of each
 	// container within a pod.
-	// This may only be set for init containers. You cannot set this field on
-	// ephemeral containers.
+	// You cannot set this field on ephemeral containers.
 	// +featureGate=SidecarContainers
 	// +optional
 	RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
+	// Represents a list of rules to be checked to determine if the
+	// container should be restarted on exit. You cannot set this field on
+	// ephemeral containers.
+	// +featureGate=ContainerRestartRules
+	// +optional
+	// +listType=atomic
+	RestartPolicyRules []ContainerRestartRule `json:"restartPolicyRules,omitempty" protobuf:"bytes,25,rep,name=restartPolicyRules"`
 	// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
 	// Cannot be updated.
 	// +optional
@@ -5091,6 +5344,10 @@ type PodStatus struct {
 	// +featureGate=DynamicResourceAllocation
 	// +optional
 	ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"`
+	// Status of extended resource claim backed by DRA.
+	// +featureGate=DRAExtendedResource
+	// +optional
+	ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus `json:"extendedResourceClaimStatus,omitempty" protobuf:"bytes,18,opt,name=extendedResourceClaimStatus"`
 }
 
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -5311,6 +5568,7 @@ type ReplicationControllerCondition struct {
 // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 // +k8s:prerelease-lifecycle-gen:introduced=1.0
+// +k8s:supportsSubresource=/scale
 
 // ReplicationController represents the configuration of a replication controller.
 type ReplicationController struct {
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 9e987eefd..120430766 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -356,11 +356,12 @@ var map_Container = map[string]string{
 	"args":                     "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
 	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
 	"ports":                    "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.",
-	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
 	"env":                      "List of environment variables to set in the container. Cannot be updated.",
 	"resources":                "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
 	"resizePolicy":             "Resources resize policy for the container.",
-	"restartPolicy":            "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
+	"restartPolicy":            "RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
+	"restartPolicyRules":       "Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy.",
 	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Cannot be updated.",
 	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
 	"livenessProbe":            "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
@@ -380,6 +381,17 @@ func (Container) SwaggerDoc() map[string]string {
 	return map_Container
 }
 
+var map_ContainerExtendedResourceRequest = map[string]string{
+	"":              "ContainerExtendedResourceRequest has the mapping of container name, extended resource name to the device request name.",
+	"containerName": "The name of the container requesting resources.",
+	"resourceName":  "The name of the extended resource in that container which gets backed by DRA.",
+	"requestName":   "The name of the request in the special ResourceClaim which corresponds to the extended resource.",
+}
+
+func (ContainerExtendedResourceRequest) SwaggerDoc() map[string]string {
+	return map_ContainerExtendedResourceRequest
+}
+
 var map_ContainerImage = map[string]string{
 	"":          "Describe a container image",
 	"names":     "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]",
@@ -413,6 +425,26 @@ func (ContainerResizePolicy) SwaggerDoc() map[string]string {
 	return map_ContainerResizePolicy
 }
 
+var map_ContainerRestartRule = map[string]string{
+	"":          "ContainerRestartRule describes how a container exit is handled.",
+	"action":    "Specifies the action taken on a container exit if the requirements are satisfied. The only possible value is \"Restart\" to restart the container.",
+	"exitCodes": "Represents the exit codes to check on container exits.",
+}
+
+func (ContainerRestartRule) SwaggerDoc() map[string]string {
+	return map_ContainerRestartRule
+}
+
+var map_ContainerRestartRuleOnExitCodes = map[string]string{
+	"":         "ContainerRestartRuleOnExitCodes describes the condition for handling an exited container based on its exit codes.",
+	"operator": "Represents the relationship between the container exit code(s) and the specified values. Possible values are: - In: the requirement is satisfied if the container exit code is in the\n  set of specified values.\n- NotIn: the requirement is satisfied if the container exit code is\n  not in the set of specified values.",
+	"values":   "Specifies the set of values to check for container exit codes. At most 255 elements are allowed.",
+}
+
+func (ContainerRestartRuleOnExitCodes) SwaggerDoc() map[string]string {
+	return map_ContainerRestartRuleOnExitCodes
+}
+
 var map_ContainerState = map[string]string{
 	"":           "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
 	"waiting":    "Details about a waiting container",
@@ -597,7 +629,7 @@ func (EndpointsList) SwaggerDoc() map[string]string {
 
 var map_EnvFromSource = map[string]string{
 	"":             "EnvFromSource represents the source of a set of ConfigMaps or Secrets",
-	"prefix":       "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.",
+	"prefix":       "Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='.",
 	"configMapRef": "The ConfigMap to select from",
 	"secretRef":    "The Secret to select from",
 }
@@ -608,7 +640,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string {
 
 var map_EnvVar = map[string]string{
 	"":          "EnvVar represents an environment variable present in a Container.",
-	"name":      "Name of the environment variable. Must be a C_IDENTIFIER.",
+	"name":      "Name of the environment variable. May consist of any printable ASCII characters except '='.",
 	"value":     "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
 	"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
 }
@@ -623,6 +655,7 @@ var map_EnvVarSource = map[string]string{
 	"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
 	"configMapKeyRef":  "Selects a key of a ConfigMap.",
 	"secretKeyRef":     "Selects a key of a secret in the pod's namespace",
+	"fileKeyRef":       "FileKeyRef selects a key of the env file. Requires the EnvFiles feature gate to be enabled.",
 }
 
 func (EnvVarSource) SwaggerDoc() map[string]string {
@@ -646,11 +679,12 @@ var map_EphemeralContainerCommon = map[string]string{
 	"args":                     "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
 	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
 	"ports":                    "Ports are not allowed for ephemeral containers.",
-	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
 	"env":                      "List of environment variables to set in the container. Cannot be updated.",
 	"resources":                "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
 	"resizePolicy":             "Resources resize policy for the container.",
-	"restartPolicy":            "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.",
+	"restartPolicy":            "Restart policy for the container to manage the restart behavior of each container within a pod. You cannot set this field on ephemeral containers.",
+	"restartPolicyRules":       "Represents a list of rules to be checked to determine if the container should be restarted on exit. You cannot set this field on ephemeral containers.",
 	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.",
 	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
 	"livenessProbe":            "Probes are not allowed for ephemeral containers.",
@@ -754,6 +788,18 @@ func (FCVolumeSource) SwaggerDoc() map[string]string {
 	return map_FCVolumeSource
 }
 
+var map_FileKeySelector = map[string]string{
+	"":           "FileKeySelector selects a key of the env file.",
+	"volumeName": "The name of the volume mount containing the env file.",
+	"path":       "The path within the volume from which to select the file. Must be relative and may not contain the '..' path or start with '..'.",
+	"key":        "The key within the env file. An invalid key will prevent the pod from starting. The keys defined within a source may consist of any printable ASCII characters except '='. During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.",
+	"optional":   "Specify whether the file or its key must be defined. If the file or key does not exist, then the env var is not published. If optional is set to true and the specified key does not exist, the environment variable will not be set in the Pod's containers.\n\nIf optional is set to false and the specified key does not exist, an error will be returned during Pod creation.",
+}
+
+func (FileKeySelector) SwaggerDoc() map[string]string {
+	return map_FileKeySelector
+}
+
 var map_FlexPersistentVolumeSource = map[string]string{
 	"":          "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
 	"driver":    "driver is the name of the driver to use for this volume.",
@@ -837,7 +883,7 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
 
 var map_GlusterfsVolumeSource = map[string]string{
 	"":          "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
-	"endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+	"endpoints": "endpoints is the endpoint name that details Glusterfs topology.",
 	"path":      "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
 	"readOnly":  "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
 }
@@ -1446,7 +1492,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
 	"volumeMode":                "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
 	"dataSource":                "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
 	"dataSourceRef":             "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n  allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n  preserves all values, and generates an error if a disallowed value is\n  specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n  in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
-	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
+	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string or nil value indicates that no VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/",
 }
 
 func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@@ -1461,8 +1507,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
 	"conditions":                       "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
 	"allocatedResources":               "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
 	"allocatedResourceStatuses":        "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
-	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
-	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim",
+	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.",
 }
 
 func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
@@ -1539,7 +1585,7 @@ var map_PersistentVolumeSpec = map[string]string{
 	"mountOptions":                  "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
 	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
 	"nodeAffinity":                  "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
-	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.",
 }
 
 func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
@@ -1606,7 +1652,7 @@ func (PodAffinityTerm) SwaggerDoc() map[string]string {
 var map_PodAntiAffinity = map[string]string{
 	"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
 	"requiredDuringSchedulingIgnoredDuringExecution":  "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
-	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and subtracting \"weight\" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
 }
 
 func (PodAntiAffinity) SwaggerDoc() map[string]string {
@@ -1626,6 +1672,20 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
 	return map_PodAttachOptions
 }
 
+var map_PodCertificateProjection = map[string]string{
+	"":                     "PodCertificateProjection provides a private key and X.509 certificate in the pod filesystem.",
+	"signerName":           "Kubelet's generated CSRs will be addressed to this signer.",
+	"keyType":              "The type of keypair Kubelet will generate for the pod.\n\nValid values are \"RSA3072\", \"RSA4096\", \"ECDSAP256\", \"ECDSAP384\", \"ECDSAP521\", and \"ED25519\".",
+	"maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nKubelet copies this value verbatim into the PodCertificateRequests it generates for this projection.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour).  The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour).  This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
+	"credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain.  If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.",
+	"keyPath":              "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath.  When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
+	"certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath.  When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
+}
+
+func (PodCertificateProjection) SwaggerDoc() map[string]string {
+	return map_PodCertificateProjection
+}
+
 var map_PodCondition = map[string]string{
 	"":                   "PodCondition contains details for the current condition of this pod.",
 	"type":               "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
@@ -1676,6 +1736,16 @@ func (PodExecOptions) SwaggerDoc() map[string]string {
 	return map_PodExecOptions
 }
 
+var map_PodExtendedResourceClaimStatus = map[string]string{
+	"":                  "PodExtendedResourceClaimStatus is stored in the PodStatus for the extended resource requests backed by DRA. It stores the generated name for the corresponding special ResourceClaim created by the scheduler.",
+	"requestMappings":   "RequestMappings identifies the mapping of  to  device request in the generated ResourceClaim.",
+	"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod.",
+}
+
+func (PodExtendedResourceClaimStatus) SwaggerDoc() map[string]string {
+	return map_PodExtendedResourceClaimStatus
+}
+
 var map_PodIP = map[string]string{
 	"":   "PodIP represents a single IP address allocated to the pod.",
 	"ip": "IP is the IP address assigned to the pod",
@@ -1824,7 +1894,7 @@ var map_PodSpec = map[string]string{
 	"serviceAccount":                "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
 	"automountServiceAccountToken":  "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
 	"nodeName":                      "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
-	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. When using HostNetwork you should specify ports so the scheduler is aware. When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`, and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`. Default to false.",
 	"hostPID":                       "Use the host's pid namespace. Optional: Default to false.",
 	"hostIPC":                       "Use the host's ipc namespace. Optional: Default to false.",
 	"shareProcessNamespace":         "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
@@ -1846,11 +1916,12 @@ var map_PodSpec = map[string]string{
 	"overhead":                      "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
 	"topologySpreadConstraints":     "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
 	"setHostnameAsFQDN":             "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
-	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
+	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
 	"hostUsers":                     "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
 	"schedulingGates":               "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
 	"resourceClaims":                "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
-	"resources":                     "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
+	"resources":                     "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
+	"hostnameOverride":              "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.",
 }
 
 func (PodSpec) SwaggerDoc() map[string]string {
@@ -1858,24 +1929,25 @@ func (PodSpec) SwaggerDoc() map[string]string {
 }
 
 var map_PodStatus = map[string]string{
-	"":                           "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
-	"observedGeneration":         "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
-	"phase":                      "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
-	"conditions":                 "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
-	"message":                    "A human readable message indicating details about why the pod is in this condition.",
-	"reason":                     "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
-	"nominatedNodeName":          "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
-	"hostIP":                     "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
-	"hostIPs":                    "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
-	"podIP":                      "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
-	"podIPs":                     "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
-	"startTime":                  "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
-	"initContainerStatuses":      "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
-	"containerStatuses":          "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
-	"qosClass":                   "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
-	"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
-	"resize":                     "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
-	"resourceClaimStatuses":      "Status of resource claims.",
+	"":                            "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
+	"observedGeneration":          "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
+	"phase":                       "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
+	"conditions":                  "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"message":                     "A human readable message indicating details about why the pod is in this condition.",
+	"reason":                      "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
+	"nominatedNodeName":           "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
+	"hostIP":                      "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
+	"hostIPs":                     "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
+	"podIP":                       "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
+	"podIPs":                      "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
+	"startTime":                   "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
+	"initContainerStatuses":       "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
+	"containerStatuses":           "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+	"qosClass":                    "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
+	"ephemeralContainerStatuses":  "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+	"resize":                      "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
+	"resourceClaimStatuses":       "Status of resource claims.",
+	"extendedResourceClaimStatus": "Status of extended resource claim backed by DRA.",
 }
 
 func (PodStatus) SwaggerDoc() map[string]string {
@@ -2205,7 +2277,7 @@ var map_ResourceRequirements = map[string]string{
 	"":         "ResourceRequirements describes the compute resource requirements.",
 	"limits":   "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
 	"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
-	"claims":   "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
+	"claims":   "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis field depends on the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
 }
 
 func (ResourceRequirements) SwaggerDoc() map[string]string {
@@ -2587,7 +2659,7 @@ var map_Taint = map[string]string{
 	"key":       "Required. The taint key to be applied to a node.",
 	"value":     "The taint value corresponding to the taint key.",
 	"effect":    "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
-	"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
+	"timeAdded": "TimeAdded represents the time at which the taint was added.",
 }
 
 func (Taint) SwaggerDoc() map[string]string {
@@ -2727,6 +2799,7 @@ var map_VolumeProjection = map[string]string{
 	"configMap":           "configMap information about the configMap data to project",
 	"serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project",
 	"clusterTrustBundle":  "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem.  Esoteric PEM features such as inter-block comments and block headers are stripped.  Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.",
+	"podCertificate":      "Projects an auto-rotating credential bundle (private key and certificate chain) that the pod can use either as a TLS client or server.\n\nKubelet generates a private key and uses it to send a PodCertificateRequest to the named signer.  Once the signer approves the request and issues a certificate chain, Kubelet writes the key and certificate chain to the pod filesystem.  The pod does not start until certificates have been issued for each podCertificate projected volume source in its spec.\n\nKubelet will begin trying to rotate the certificate at the time indicated by the signer using the PodCertificateRequest.Status.BeginRefreshAt timestamp.\n\nKubelet can write a single file, indicated by the credentialBundlePath field, or separate files, indicated by the keyPath and certificateChainPath fields.\n\nThe credential bundle is a single file in PEM format.  The first PEM entry is the private key (in PKCS#8 format), and the remaining PEM entries are the certificate chain issued by the signer (typically, signers will return their certificate chain in leaf-to-root order).\n\nPrefer using the credential bundle format, since your application code can read it atomically.  If you use keyPath and certificateChainPath, your application must make two separate file reads. If these coincide with a certificate rotation, it is possible that the private key and leaf certificate you read may not correspond to each other.  Your application will need to check for this condition, and re-read until they are consistent.\n\nThe named signer controls chooses the format of the certificate it issues; consult the signer implementation's documentation to learn how to use the certificates it issues.",
 }
 
 func (VolumeProjection) SwaggerDoc() map[string]string {
@@ -2752,10 +2825,10 @@ var map_VolumeSource = map[string]string{
 	"gitRepo":               "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
 	"secret":                "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
 	"nfs":                   "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
-	"iscsi":                 "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
-	"glusterfs":             "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+	"iscsi":                 "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi",
+	"glusterfs":             "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.",
 	"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
-	"rbd":                   "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
+	"rbd":                   "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.",
 	"flexVolume":            "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
 	"cinder":                "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
 	"cephfs":                "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index 619c52542..bcd91bd01 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -829,6 +829,13 @@ func (in *Container) DeepCopyInto(out *Container) {
 		*out = new(ContainerRestartPolicy)
 		**out = **in
 	}
+	if in.RestartPolicyRules != nil {
+		in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
+		*out = make([]ContainerRestartRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
 	if in.VolumeMounts != nil {
 		in, out := &in.VolumeMounts, &out.VolumeMounts
 		*out = make([]VolumeMount, len(*in))
@@ -879,6 +886,22 @@ func (in *Container) DeepCopy() *Container {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerExtendedResourceRequest) DeepCopyInto(out *ContainerExtendedResourceRequest) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerExtendedResourceRequest.
+func (in *ContainerExtendedResourceRequest) DeepCopy() *ContainerExtendedResourceRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerExtendedResourceRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ContainerImage) DeepCopyInto(out *ContainerImage) {
 	*out = *in
@@ -932,6 +955,48 @@ func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRestartRule) DeepCopyInto(out *ContainerRestartRule) {
+	*out = *in
+	if in.ExitCodes != nil {
+		in, out := &in.ExitCodes, &out.ExitCodes
+		*out = new(ContainerRestartRuleOnExitCodes)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRule.
+func (in *ContainerRestartRule) DeepCopy() *ContainerRestartRule {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerRestartRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerRestartRuleOnExitCodes) DeepCopyInto(out *ContainerRestartRuleOnExitCodes) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]int32, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerRestartRuleOnExitCodes.
+func (in *ContainerRestartRuleOnExitCodes) DeepCopy() *ContainerRestartRuleOnExitCodes {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerRestartRuleOnExitCodes)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ContainerState) DeepCopyInto(out *ContainerState) {
 	*out = *in
@@ -1433,6 +1498,11 @@ func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
 		*out = new(SecretKeySelector)
 		(*in).DeepCopyInto(*out)
 	}
+	if in.FileKeyRef != nil {
+		in, out := &in.FileKeyRef, &out.FileKeyRef
+		*out = new(FileKeySelector)
+		(*in).DeepCopyInto(*out)
+	}
 	return
 }
 
@@ -1506,6 +1576,13 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
 		*out = new(ContainerRestartPolicy)
 		**out = **in
 	}
+	if in.RestartPolicyRules != nil {
+		in, out := &in.RestartPolicyRules, &out.RestartPolicyRules
+		*out = make([]ContainerRestartRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
 	if in.VolumeMounts != nil {
 		in, out := &in.VolumeMounts, &out.VolumeMounts
 		*out = make([]VolumeMount, len(*in))
@@ -1736,6 +1813,27 @@ func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FileKeySelector) DeepCopyInto(out *FileKeySelector) {
+	*out = *in
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileKeySelector.
+func (in *FileKeySelector) DeepCopy() *FileKeySelector {
+	if in == nil {
+		return nil
+	}
+	out := new(FileKeySelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
 	*out = *in
@@ -3797,6 +3895,27 @@ func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
 	return nil
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection) {
+	*out = *in
+	if in.MaxExpirationSeconds != nil {
+		in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateProjection.
+func (in *PodCertificateProjection) DeepCopy() *PodCertificateProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCertificateProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *PodCondition) DeepCopyInto(out *PodCondition) {
 	*out = *in
@@ -3899,6 +4018,27 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object {
 	return nil
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodExtendedResourceClaimStatus) DeepCopyInto(out *PodExtendedResourceClaimStatus) {
+	*out = *in
+	if in.RequestMappings != nil {
+		in, out := &in.RequestMappings, &out.RequestMappings
+		*out = make([]ContainerExtendedResourceRequest, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExtendedResourceClaimStatus.
+func (in *PodExtendedResourceClaimStatus) DeepCopy() *PodExtendedResourceClaimStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodExtendedResourceClaimStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *PodIP) DeepCopyInto(out *PodIP) {
 	*out = *in
@@ -4412,6 +4552,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
 		*out = new(ResourceRequirements)
 		(*in).DeepCopyInto(*out)
 	}
+	if in.HostnameOverride != nil {
+		in, out := &in.HostnameOverride, &out.HostnameOverride
+		*out = new(string)
+		**out = **in
+	}
 	return
 }
 
@@ -4477,6 +4622,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
 			(*in)[i].DeepCopyInto(&(*out)[i])
 		}
 	}
+	if in.ExtendedResourceClaimStatus != nil {
+		in, out := &in.ExtendedResourceClaimStatus, &out.ExtendedResourceClaimStatus
+		*out = new(PodExtendedResourceClaimStatus)
+		(*in).DeepCopyInto(*out)
+	}
 	return
 }
 
@@ -6412,6 +6562,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
 		*out = new(ClusterTrustBundleProjection)
 		(*in).DeepCopyInto(*out)
 	}
+	if in.PodCertificate != nil {
+		in, out := &in.PodCertificate, &out.PodCertificate
+		*out = new(PodCertificateProjection)
+		(*in).DeepCopyInto(*out)
+	}
 	return
 }
 
diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go
index 7770fab5d..be710973c 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/doc.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go
@@ -18,5 +18,7 @@ limitations under the License.
 // +k8s:protobuf-gen=package
 // +k8s:openapi-gen=true
 // +k8s:prerelease-lifecycle-gen=true
+// +k8s:validation-gen=TypeMeta
+// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1
 
 package v1beta1
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index 70fcec0cc..fed0b4835 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -980,7 +980,7 @@ message RollingUpdateDaemonSet {
   // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
   // on that node is marked deleted. If the old pod becomes unavailable for any
   // reason (Ready transitions to false, is evicted, or is drained) an updated
-  // pod is immediatedly created on that node without considering surge limits.
+  // pod is immediately created on that node without considering surge limits.
   // Allowing surge implies the possibility that the resources consumed by the
   // daemonset on any given node can double if the readiness check fails, and
   // so resource intensive daemonsets should take into account that they may
@@ -1039,6 +1039,9 @@ message Scale {
 message ScaleSpec {
   // desired number of instances for the scaled object.
   // +optional
+  // +k8s:optional
+  // +default=0
+  // +k8s:minimum=0
   optional int32 replicas = 1;
 }
 
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
index b80a7a7e1..c7b50e059 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -27,6 +27,9 @@ import (
 type ScaleSpec struct {
 	// desired number of instances for the scaled object.
 	// +optional
+	// +k8s:optional
+	// +default=0
+	// +k8s:minimum=0
 	Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
 }
 
@@ -54,6 +57,7 @@ type ScaleStatus struct {
 // +k8s:prerelease-lifecycle-gen:introduced=1.1
 // +k8s:prerelease-lifecycle-gen:deprecated=1.2
 // +k8s:prerelease-lifecycle-gen:removed=1.16
+// +k8s:isSubresource=/scale
 
 // represents a scaling request for a resource.
 type Scale struct {
@@ -398,7 +402,7 @@ type RollingUpdateDaemonSet struct {
 	// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
 	// on that node is marked deleted. If the old pod becomes unavailable for any
 	// reason (Ready transitions to false, is evicted, or is drained) an updated
-	// pod is immediatedly created on that node without considering surge limits.
+	// pod is immediately created on that node without considering surge limits.
 	// Allowing surge implies the possibility that the resources consumed by the
 	// daemonset on any given node can double if the readiness check fails, and
 	// so resource intensive daemonsets should take into account that they may
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 923fab3aa..8a158233e 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -482,7 +482,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string {
 var map_RollingUpdateDaemonSet = map[string]string{
 	"":               "Spec to control the desired behavior of daemon set rolling update.",
 	"maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
-	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
+	"maxSurge":       "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediately created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
 }
 
 func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
new file mode 100644
index 000000000..6d2a1666a
--- /dev/null
+++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
@@ -0,0 +1,78 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by validation-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	context "context"
+	fmt "fmt"
+
+	operation "k8s.io/apimachinery/pkg/api/operation"
+	safe "k8s.io/apimachinery/pkg/api/safe"
+	validate "k8s.io/apimachinery/pkg/api/validate"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	field "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+func init() { localSchemeBuilder.Register(RegisterValidations) }
+
+// RegisterValidations adds validation functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterValidations(scheme *runtime.Scheme) error {
+	scheme.AddValidationFunc((*Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
+		switch op.Request.SubresourcePath() {
+		case "/scale":
+			return Validate_Scale(ctx, op, nil /* fldPath */, obj.(*Scale), safe.Cast[*Scale](oldObj))
+		}
+		return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
+	})
+	return nil
+}
+
+func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *Scale) (errs field.ErrorList) {
+	// field Scale.TypeMeta has no validation
+	// field Scale.ObjectMeta has no validation
+
+	// field Scale.Spec
+	errs = append(errs,
+		func(fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
+			errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
+			return
+		}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }))...)
+
+	// field Scale.Status has no validation
+	return errs
+}
+
+func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
+	// field ScaleSpec.Replicas
+	errs = append(errs,
+		func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
+			// optional value-type fields with zero-value defaults are purely documentation
+			if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
+				return nil // no changes
+			}
+			errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
+			return
+		}(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }))...)
+
+	return errs
+}
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
index e3e3e9215..16a2792aa 100644
--- a/vendor/k8s.io/api/networking/v1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -534,11 +534,12 @@ message NetworkPolicyPort {
 // NetworkPolicySpec provides the specification of a NetworkPolicy
 message NetworkPolicySpec {
   // podSelector selects the pods to which this NetworkPolicy object applies.
-  // The array of ingress rules is applied to any pods selected by this field.
+  // The array of rules is applied to any pods selected by this field. An empty
+  // selector matches all pods in the policy's namespace.
   // Multiple network policies can select the same set of pods. In this case,
   // the ingress rules for each are combined additively.
-  // This field is NOT optional and follows standard label selector semantics.
-  // An empty podSelector matches all pods in this namespace.
+  // This field is optional. If it is not specified, it defaults to an empty selector.
+  // +optional
   optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1;
 
   // ingress is a list of ingress rules to be applied to the selected pods.
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
index 216647cee..7d9a4fc94 100644
--- a/vendor/k8s.io/api/networking/v1/types.go
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -60,11 +60,12 @@ const (
 // NetworkPolicySpec provides the specification of a NetworkPolicy
 type NetworkPolicySpec struct {
 	// podSelector selects the pods to which this NetworkPolicy object applies.
-	// The array of ingress rules is applied to any pods selected by this field.
+	// The array of rules is applied to any pods selected by this field. An empty
+	// selector matches all pods in the policy's namespace.
 	// Multiple network policies can select the same set of pods. In this case,
 	// the ingress rules for each are combined additively.
-	// This field is NOT optional and follows standard label selector semantics.
-	// An empty podSelector matches all pods in this namespace.
+	// This field is optional. If it is not specified, it defaults to an empty selector.
+	// +optional
 	PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
 
 	// ingress is a list of ingress rules to be applied to the selected pods.
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
index 0e294848b..6210bb7a5 100644
--- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -313,7 +313,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string {
 
 var map_NetworkPolicySpec = map[string]string{
 	"":            "NetworkPolicySpec provides the specification of a NetworkPolicy",
-	"podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.",
+	"podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of rules is applied to any pods selected by this field. An empty selector matches all pods in the policy's namespace. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is optional. If it is not specified, it defaults to an empty selector.",
 	"ingress":     "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)",
 	"egress":      "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8",
 	"policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8",
diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
deleted file mode 100644
index 0d4203483..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
+++ /dev/null
@@ -1,1929 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/api/networking/v1alpha1/generated.proto
-
-package v1alpha1
-
-import (
-	fmt "fmt"
-
-	io "io"
-
-	proto "github.com/gogo/protobuf/proto"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
-	math "math"
-	math_bits "math/bits"
-	reflect "reflect"
-	strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *IPAddress) Reset()      { *m = IPAddress{} }
-func (*IPAddress) ProtoMessage() {}
-func (*IPAddress) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{0}
-}
-func (m *IPAddress) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *IPAddress) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IPAddress.Merge(m, src)
-}
-func (m *IPAddress) XXX_Size() int {
-	return m.Size()
-}
-func (m *IPAddress) XXX_DiscardUnknown() {
-	xxx_messageInfo_IPAddress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddress proto.InternalMessageInfo
-
-func (m *IPAddressList) Reset()      { *m = IPAddressList{} }
-func (*IPAddressList) ProtoMessage() {}
-func (*IPAddressList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{1}
-}
-func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *IPAddressList) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IPAddressList.Merge(m, src)
-}
-func (m *IPAddressList) XXX_Size() int {
-	return m.Size()
-}
-func (m *IPAddressList) XXX_DiscardUnknown() {
-	xxx_messageInfo_IPAddressList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
-
-func (m *IPAddressSpec) Reset()      { *m = IPAddressSpec{} }
-func (*IPAddressSpec) ProtoMessage() {}
-func (*IPAddressSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{2}
-}
-func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_IPAddressSpec.Merge(m, src)
-}
-func (m *IPAddressSpec) XXX_Size() int {
-	return m.Size()
-}
-func (m *IPAddressSpec) XXX_DiscardUnknown() {
-	xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
-
-func (m *ParentReference) Reset()      { *m = ParentReference{} }
-func (*ParentReference) ProtoMessage() {}
-func (*ParentReference) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{3}
-}
-func (m *ParentReference) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *ParentReference) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ParentReference.Merge(m, src)
-}
-func (m *ParentReference) XXX_Size() int {
-	return m.Size()
-}
-func (m *ParentReference) XXX_DiscardUnknown() {
-	xxx_messageInfo_ParentReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ParentReference proto.InternalMessageInfo
-
-func (m *ServiceCIDR) Reset()      { *m = ServiceCIDR{} }
-func (*ServiceCIDR) ProtoMessage() {}
-func (*ServiceCIDR) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{4}
-}
-func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceCIDR.Merge(m, src)
-}
-func (m *ServiceCIDR) XXX_Size() int {
-	return m.Size()
-}
-func (m *ServiceCIDR) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
-
-func (m *ServiceCIDRList) Reset()      { *m = ServiceCIDRList{} }
-func (*ServiceCIDRList) ProtoMessage() {}
-func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{5}
-}
-func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceCIDRList.Merge(m, src)
-}
-func (m *ServiceCIDRList) XXX_Size() int {
-	return m.Size()
-}
-func (m *ServiceCIDRList) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
-
-func (m *ServiceCIDRSpec) Reset()      { *m = ServiceCIDRSpec{} }
-func (*ServiceCIDRSpec) ProtoMessage() {}
-func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{6}
-}
-func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
-}
-func (m *ServiceCIDRSpec) XXX_Size() int {
-	return m.Size()
-}
-func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
-
-func (m *ServiceCIDRStatus) Reset()      { *m = ServiceCIDRStatus{} }
-func (*ServiceCIDRStatus) ProtoMessage() {}
-func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_c1cb39e7b48ce50d, []int{7}
-}
-func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
-}
-func (m *ServiceCIDRStatus) XXX_Size() int {
-	return m.Size()
-}
-func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress")
-	proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList")
-	proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec")
-	proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference")
-	proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR")
-	proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList")
-	proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec")
-	proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus")
-}
-
-func init() {
-	proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d)
-}
-
-var fileDescriptor_c1cb39e7b48ce50d = []byte{
-	// 634 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a,
-	0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6,
-	0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05,
-	0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e,
-	0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46,
-	0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99,
-	0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e,
-	0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38,
-	0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a,
-	0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9,
-	0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43,
-	0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15,
-	0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42,
-	0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45,
-	0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06,
-	0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e,
-	0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58,
-	0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85,
-	0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3,
-	0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05,
-	0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09,
-	0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7,
-	0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04,
-	0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0,
-	0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85,
-	0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5,
-	0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b,
-	0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa,
-	0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39,
-	0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a,
-	0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd,
-	0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56,
-	0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4,
-	0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d,
-	0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62,
-	0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5,
-	0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce,
-	0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad,
-	0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00,
-	0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00,
-}
-
-func (m *IPAddress) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	{
-		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x12
-	{
-		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *IPAddressList) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if len(m.Items) > 0 {
-		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	{
-		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.ParentRef != nil {
-		{
-			size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintGenerated(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ParentReference) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	i -= len(m.Name)
-	copy(dAtA[i:], m.Name)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
-	i--
-	dAtA[i] = 0x22
-	i -= len(m.Namespace)
-	copy(dAtA[i:], m.Namespace)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
-	i--
-	dAtA[i] = 0x1a
-	i -= len(m.Resource)
-	copy(dAtA[i:], m.Resource)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
-	i--
-	dAtA[i] = 0x12
-	i -= len(m.Group)
-	copy(dAtA[i:], m.Group)
-	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	{
-		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x1a
-	{
-		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x12
-	{
-		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if len(m.Items) > 0 {
-		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	{
-		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintGenerated(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if len(m.CIDRs) > 0 {
-		for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.CIDRs[iNdEx])
-			copy(dAtA[i:], m.CIDRs[iNdEx])
-			i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
-			i--
-			dAtA[i] = 0xa
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if len(m.Conditions) > 0 {
-		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintGenerated(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0xa
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
-	offset -= sovGenerated(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *IPAddress) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ObjectMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	l = m.Spec.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
-}
-
-func (m *IPAddressList) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ListMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	if len(m.Items) > 0 {
-		for _, e := range m.Items {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
-	return n
-}
-
-func (m *IPAddressSpec) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ParentRef != nil {
-		l = m.ParentRef.Size()
-		n += 1 + l + sovGenerated(uint64(l))
-	}
-	return n
-}
-
-func (m *ParentReference) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Group)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Resource)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Namespace)
-	n += 1 + l + sovGenerated(uint64(l))
-	l = len(m.Name)
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
-}
-
-func (m *ServiceCIDR) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ObjectMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	l = m.Spec.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	l = m.Status.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	return n
-}
-
-func (m *ServiceCIDRList) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ListMeta.Size()
-	n += 1 + l + sovGenerated(uint64(l))
-	if len(m.Items) > 0 {
-		for _, e := range m.Items {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
-	return n
-}
-
-func (m *ServiceCIDRSpec) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.CIDRs) > 0 {
-		for _, s := range m.CIDRs {
-			l = len(s)
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
-	return n
-}
-
-func (m *ServiceCIDRStatus) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.Conditions) > 0 {
-		for _, e := range m.Conditions {
-			l = e.Size()
-			n += 1 + l + sovGenerated(uint64(l))
-		}
-	}
-	return n
-}
-
-func sovGenerated(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
-	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *IPAddress) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&IPAddress{`,
-		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *IPAddressList) String() string {
-	if this == nil {
-		return "nil"
-	}
-	repeatedStringForItems := "[]IPAddress{"
-	for _, f := range this.Items {
-		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForItems += "}"
-	s := strings.Join([]string{`&IPAddressList{`,
-		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
-		`Items:` + repeatedStringForItems + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *IPAddressSpec) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&IPAddressSpec{`,
-		`ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ParentReference) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&ParentReference{`,
-		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
-		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
-		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
-		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ServiceCIDR) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&ServiceCIDR{`,
-		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
-		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
-		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ServiceCIDRList) String() string {
-	if this == nil {
-		return "nil"
-	}
-	repeatedStringForItems := "[]ServiceCIDR{"
-	for _, f := range this.Items {
-		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
-	}
-	repeatedStringForItems += "}"
-	s := strings.Join([]string{`&ServiceCIDRList{`,
-		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
-		`Items:` + repeatedStringForItems + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ServiceCIDRSpec) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&ServiceCIDRSpec{`,
-		`CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ServiceCIDRStatus) String() string {
-	if this == nil {
-		return "nil"
-	}
-	repeatedStringForConditions := "[]Condition{"
-	for _, f := range this.Conditions {
-		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
-	}
-	repeatedStringForConditions += "}"
-	s := strings.Join([]string{`&ServiceCIDRStatus{`,
-		`Conditions:` + repeatedStringForConditions + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func valueToStringGenerated(v interface{}) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("*%v", pv)
-}
-func (m *IPAddress) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: IPAddress: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *IPAddressList) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Items = append(m.Items, IPAddress{})
-			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *IPAddressSpec) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.ParentRef == nil {
-				m.ParentRef = &ParentReference{}
-			}
-			if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ParentReference) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ParentReference: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Group = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Resource = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Namespace = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Items = append(m.Items, ServiceCIDR{})
-			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Conditions = append(m.Conditions, v1.Condition{})
-			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if (skippy < 0) || (iNdEx+skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipGenerated(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	depth := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-		case 1:
-			iNdEx += 8
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowGenerated
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			iNdEx += length
-		case 3:
-			depth++
-		case 4:
-			if depth == 0 {
-				return 0, ErrUnexpectedEndOfGroupGenerated
-			}
-			depth--
-		case 5:
-			iNdEx += 4
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-		if iNdEx < 0 {
-			return 0, ErrInvalidLengthGenerated
-		}
-		if depth == 0 {
-			return iNdEx, nil
-		}
-	}
-	return 0, io.ErrUnexpectedEOF
-}
-
-var (
-	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
-	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto
deleted file mode 100644
index 80ec6af73..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-
-// This file was autogenerated by go-to-protobuf. Do not edit it manually!
-
-syntax = "proto2";
-
-package k8s.io.api.networking.v1alpha1;
-
-import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
-import "k8s.io/apimachinery/pkg/runtime/generated.proto";
-import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
-
-// Package-wide variables from generator "generated".
-option go_package = "k8s.io/api/networking/v1alpha1";
-
-// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
-// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
-// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
-// the name of the object is the IP address in canonical format, four decimal digits separated
-// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
-// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
-// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
-message IPAddress {
-  // Standard object's metadata.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-  // +optional
-  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
-  // spec is the desired state of the IPAddress.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-  // +optional
-  optional IPAddressSpec spec = 2;
-}
-
-// IPAddressList contains a list of IPAddress.
-message IPAddressList {
-  // Standard object's metadata.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-  // +optional
-  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
-  // items is the list of IPAddresses.
-  repeated IPAddress items = 2;
-}
-
-// IPAddressSpec describe the attributes in an IP Address.
-message IPAddressSpec {
-  // ParentRef references the resource that an IPAddress is attached to.
-  // An IPAddress must reference a parent object.
-  // +required
-  optional ParentReference parentRef = 1;
-}
-
-// ParentReference describes a reference to a parent object.
-message ParentReference {
-  // Group is the group of the object being referenced.
-  // +optional
-  optional string group = 1;
-
-  // Resource is the resource of the object being referenced.
-  // +required
-  optional string resource = 2;
-
-  // Namespace is the namespace of the object being referenced.
-  // +optional
-  optional string namespace = 3;
-
-  // Name is the name of the object being referenced.
-  // +required
-  optional string name = 4;
-}
-
-// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
-// This range is used to allocate ClusterIPs to Service objects.
-message ServiceCIDR {
-  // Standard object's metadata.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-  // +optional
-  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
-  // spec is the desired state of the ServiceCIDR.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-  // +optional
-  optional ServiceCIDRSpec spec = 2;
-
-  // status represents the current state of the ServiceCIDR.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-  // +optional
-  optional ServiceCIDRStatus status = 3;
-}
-
-// ServiceCIDRList contains a list of ServiceCIDR objects.
-message ServiceCIDRList {
-  // Standard object's metadata.
-  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-  // +optional
-  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
-  // items is the list of ServiceCIDRs.
-  repeated ServiceCIDR items = 2;
-}
-
-// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
-message ServiceCIDRSpec {
-  // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
-  // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
-  // The network address of each CIDR, the address that identifies the subnet of a host, is reserved
-  // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
-  // allocated.
-  // This field is immutable.
-  // +optional
-  // +listType=atomic
-  repeated string cidrs = 1;
-}
-
-// ServiceCIDRStatus describes the current state of the ServiceCIDR.
-message ServiceCIDRStatus {
-  // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
-  // Current service state
-  // +optional
-  // +patchMergeKey=type
-  // +patchStrategy=merge
-  // +listType=map
-  // +listMapKey=type
-  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
-}
-
diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go
deleted file mode 100644
index 0e454f026..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/types.go
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
-// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
-// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
-// the name of the object is the IP address in canonical format, four decimal digits separated
-// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
-// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
-// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
-type IPAddress struct {
-	metav1.TypeMeta `json:",inline"`
-	// Standard object's metadata.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-	// +optional
-	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-	// spec is the desired state of the IPAddress.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-	// +optional
-	Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
-}
-
-// IPAddressSpec describe the attributes in an IP Address.
-type IPAddressSpec struct {
-	// ParentRef references the resource that an IPAddress is attached to.
-	// An IPAddress must reference a parent object.
-	// +required
-	ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"`
-}
-
-// ParentReference describes a reference to a parent object.
-type ParentReference struct {
-	// Group is the group of the object being referenced.
-	// +optional
-	Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
-	// Resource is the resource of the object being referenced.
-	// +required
-	Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"`
-	// Namespace is the namespace of the object being referenced.
-	// +optional
-	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
-	// Name is the name of the object being referenced.
-	// +required
-	Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// IPAddressList contains a list of IPAddress.
-type IPAddressList struct {
-	metav1.TypeMeta `json:",inline"`
-	// Standard object's metadata.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-	// +optional
-	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-	// items is the list of IPAddresses.
-	Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
-}
-
-// +genclient
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
-// This range is used to allocate ClusterIPs to Service objects.
-type ServiceCIDR struct {
-	metav1.TypeMeta `json:",inline"`
-	// Standard object's metadata.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-	// +optional
-	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-	// spec is the desired state of the ServiceCIDR.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-	// +optional
-	Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
-	// status represents the current state of the ServiceCIDR.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-	// +optional
-	Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
-}
-
-// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
-type ServiceCIDRSpec struct {
-	// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
-	// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
-	// The network address of each CIDR, the address that identifies the subnet of a host, is reserved
-	// and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be
-	// allocated.
-	// This field is immutable.
-	// +optional
-	// +listType=atomic
-	CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
-}
-
-const (
-	// ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
-	// apiserver to allocate ClusterIPs for Services.
-	ServiceCIDRConditionReady = "Ready"
-	// ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
-	// being deleted.
-	ServiceCIDRReasonTerminating = "Terminating"
-)
-
-// ServiceCIDRStatus describes the current state of the ServiceCIDR.
-type ServiceCIDRStatus struct {
-	// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
-	// Current service state
-	// +optional
-	// +patchMergeKey=type
-	// +patchStrategy=merge
-	// +listType=map
-	// +listMapKey=type
-	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.27
-
-// ServiceCIDRList contains a list of ServiceCIDR objects.
-type ServiceCIDRList struct {
-	metav1.TypeMeta `json:",inline"`
-	// Standard object's metadata.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-	// +optional
-	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-	// items is the list of ServiceCIDRs.
-	Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
-}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
deleted file mode 100644
index 4c8eb57a7..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-// This file contains a collection of methods that can be used from go-restful to
-// generate Swagger API documentation for its models. Please read this PR for more
-// information on the implementation: https://github.com/emicklei/go-restful/pull/215
-//
-// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
-// they are on one line! For multiple line or blocks that you want to ignore use ---.
-// Any context after a --- is ignored.
-//
-// Those methods can be generated by using hack/update-codegen.sh
-
-// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
-var map_IPAddress = map[string]string{
-	"":         "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
-	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
-	"spec":     "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
-}
-
-func (IPAddress) SwaggerDoc() map[string]string {
-	return map_IPAddress
-}
-
-var map_IPAddressList = map[string]string{
-	"":         "IPAddressList contains a list of IPAddress.",
-	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
-	"items":    "items is the list of IPAddresses.",
-}
-
-func (IPAddressList) SwaggerDoc() map[string]string {
-	return map_IPAddressList
-}
-
-var map_IPAddressSpec = map[string]string{
-	"":          "IPAddressSpec describe the attributes in an IP Address.",
-	"parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
-}
-
-func (IPAddressSpec) SwaggerDoc() map[string]string {
-	return map_IPAddressSpec
-}
-
-var map_ParentReference = map[string]string{
-	"":          "ParentReference describes a reference to a parent object.",
-	"group":     "Group is the group of the object being referenced.",
-	"resource":  "Resource is the resource of the object being referenced.",
-	"namespace": "Namespace is the namespace of the object being referenced.",
-	"name":      "Name is the name of the object being referenced.",
-}
-
-func (ParentReference) SwaggerDoc() map[string]string {
-	return map_ParentReference
-}
-
-var map_ServiceCIDR = map[string]string{
-	"":         "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
-	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
-	"spec":     "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
-	"status":   "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
-}
-
-func (ServiceCIDR) SwaggerDoc() map[string]string {
-	return map_ServiceCIDR
-}
-
-var map_ServiceCIDRList = map[string]string{
-	"":         "ServiceCIDRList contains a list of ServiceCIDR objects.",
-	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
-	"items":    "items is the list of ServiceCIDRs.",
-}
-
-func (ServiceCIDRList) SwaggerDoc() map[string]string {
-	return map_ServiceCIDRList
-}
-
-var map_ServiceCIDRSpec = map[string]string{
-	"":      "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
-	"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.",
-}
-
-func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
-	return map_ServiceCIDRSpec
-}
-
-var map_ServiceCIDRStatus = map[string]string{
-	"":           "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
-	"conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
-}
-
-func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
-	return map_ServiceCIDRStatus
-}
-
-// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go
deleted file mode 100644
index 5f9c23f70..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-Copyright 2023 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-const (
-
-	// TODO: Use IPFamily as field with a field selector,And the value is set based on
-	// the name at create time and immutable.
-	// LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress.
-	// This label simplify dual-stack client operations allowing to obtain the list of
-	// IP addresses filtered by family.
-	LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family"
-	// LabelManagedBy is used to indicate the controller or entity that manages
-	// an IPAddress. This label aims to enable different IPAddress
-	// objects to be managed by different controllers or entities within the
-	// same cluster. It is highly recommended to configure this label for all
-	// IPAddress objects.
-	LabelManagedBy = "ipaddress.kubernetes.io/managed-by"
-)
diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
deleted file mode 100644
index 5c8f697ba..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,229 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IPAddress) DeepCopyInto(out *IPAddress) {
-	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
-	in.Spec.DeepCopyInto(&out.Spec)
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
-func (in *IPAddress) DeepCopy() *IPAddress {
-	if in == nil {
-		return nil
-	}
-	out := new(IPAddress)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *IPAddress) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
-	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ListMeta.DeepCopyInto(&out.ListMeta)
-	if in.Items != nil {
-		in, out := &in.Items, &out.Items
-		*out = make([]IPAddress, len(*in))
-		for i := range *in {
-			(*in)[i].DeepCopyInto(&(*out)[i])
-		}
-	}
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
-func (in *IPAddressList) DeepCopy() *IPAddressList {
-	if in == nil {
-		return nil
-	}
-	out := new(IPAddressList)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *IPAddressList) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
-	*out = *in
-	if in.ParentRef != nil {
-		in, out := &in.ParentRef, &out.ParentRef
-		*out = new(ParentReference)
-		**out = **in
-	}
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
-func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
-	if in == nil {
-		return nil
-	}
-	out := new(IPAddressSpec)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ParentReference) DeepCopyInto(out *ParentReference) {
-	*out = *in
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
-func (in *ParentReference) DeepCopy() *ParentReference {
-	if in == nil {
-		return nil
-	}
-	out := new(ParentReference)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
-	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
-	in.Spec.DeepCopyInto(&out.Spec)
-	in.Status.DeepCopyInto(&out.Status)
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
-func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
-	if in == nil {
-		return nil
-	}
-	out := new(ServiceCIDR)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
-	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ListMeta.DeepCopyInto(&out.ListMeta)
-	if in.Items != nil {
-		in, out := &in.Items, &out.Items
-		*out = make([]ServiceCIDR, len(*in))
-		for i := range *in {
-			(*in)[i].DeepCopyInto(&(*out)[i])
-		}
-	}
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
-func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
-	if in == nil {
-		return nil
-	}
-	out := new(ServiceCIDRList)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
-	*out = *in
-	if in.CIDRs != nil {
-		in, out := &in.CIDRs, &out.CIDRs
-		*out = make([]string, len(*in))
-		copy(*out, *in)
-	}
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
-func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
-	if in == nil {
-		return nil
-	}
-	out := new(ServiceCIDRSpec)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
-	*out = *in
-	if in.Conditions != nil {
-		in, out := &in.Conditions, &out.Conditions
-		*out = make([]v1.Condition, len(*in))
-		for i := range *in {
-			(*in)[i].DeepCopyInto(&(*out)[i])
-		}
-	}
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
-func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
-	if in == nil {
-		return nil
-	}
-	out := new(ServiceCIDRStatus)
-	in.DeepCopyInto(out)
-	return out
-}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
deleted file mode 100644
index 714e7b625..000000000
--- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ /dev/null
@@ -1,94 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
-
-package v1alpha1
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
-	return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *IPAddress) APILifecycleDeprecated() (major, minor int) {
-	return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *IPAddress) APILifecycleRemoved() (major, minor int) {
-	return 1, 33
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) {
-	return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) {
-	return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *IPAddressList) APILifecycleRemoved() (major, minor int) {
-	return 1, 33
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
-	return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) {
-	return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) {
-	return 1, 33
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
-	return 1, 27
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or  "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) {
-	return 1, 30
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or  "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) {
-	return 1, 33
-}
diff --git a/vendor/k8s.io/api/resource/v1/devicetaint.go b/vendor/k8s.io/api/resource/v1/devicetaint.go
new file mode 100644
index 000000000..a5c2e20a6
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1/devicetaint.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "fmt"
+
+var _ fmt.Stringer = DeviceTaint{}
+
+// String converts to a string in the format '=:', '=:', ':', or ''.
+func (t DeviceTaint) String() string {
+	if len(t.Effect) == 0 {
+		if len(t.Value) == 0 {
+			return fmt.Sprintf("%v", t.Key)
+		}
+		return fmt.Sprintf("%v=%v:", t.Key, t.Value)
+	}
+	if len(t.Value) == 0 {
+		return fmt.Sprintf("%v:%v", t.Key, t.Effect)
+	}
+	return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
+}
diff --git a/vendor/k8s.io/api/networking/v1alpha1/doc.go b/vendor/k8s.io/api/resource/v1/doc.go
similarity index 83%
rename from vendor/k8s.io/api/networking/v1alpha1/doc.go
rename to vendor/k8s.io/api/resource/v1/doc.go
index 55264ae70..c94ca75dd 100644
--- a/vendor/k8s.io/api/networking/v1alpha1/doc.go
+++ b/vendor/k8s.io/api/resource/v1/doc.go
@@ -1,5 +1,5 @@
 /*
-Copyright 2022 The Kubernetes Authors.
+Copyright 2025 The Kubernetes Authors.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 */
 
+// +k8s:openapi-gen=true
 // +k8s:deepcopy-gen=package
 // +k8s:protobuf-gen=package
-// +k8s:openapi-gen=true
 // +k8s:prerelease-lifecycle-gen=true
-// +groupName=networking.k8s.io
+// +groupName=resource.k8s.io
 
-package v1alpha1
+// Package v1 is the v1 version of the resource API.
+package v1
diff --git a/vendor/k8s.io/api/resource/v1/generated.pb.go b/vendor/k8s.io/api/resource/v1/generated.pb.go
new file mode 100644
index 000000000..5695e2c7e
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1/generated.pb.go
@@ -0,0 +1,12777 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/api/resource/v1/generated.proto
+
+package v1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	v11 "k8s.io/api/core/v1"
+	resource "k8s.io/apimachinery/pkg/api/resource"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AllocatedDeviceStatus) Reset()      { *m = AllocatedDeviceStatus{} }
+func (*AllocatedDeviceStatus) ProtoMessage() {}
+func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{0}
+}
+func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
+}
+func (m *AllocatedDeviceStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
+
+func (m *AllocationResult) Reset()      { *m = AllocationResult{} }
+func (*AllocationResult) ProtoMessage() {}
+func (*AllocationResult) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{1}
+}
+func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AllocationResult) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AllocationResult.Merge(m, src)
+}
+func (m *AllocationResult) XXX_Size() int {
+	return m.Size()
+}
+func (m *AllocationResult) XXX_DiscardUnknown() {
+	xxx_messageInfo_AllocationResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
+
+func (m *CELDeviceSelector) Reset()      { *m = CELDeviceSelector{} }
+func (*CELDeviceSelector) ProtoMessage() {}
+func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{2}
+}
+func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CELDeviceSelector.Merge(m, src)
+}
+func (m *CELDeviceSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *CELDeviceSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
+
+func (m *CapacityRequestPolicy) Reset()      { *m = CapacityRequestPolicy{} }
+func (*CapacityRequestPolicy) ProtoMessage() {}
+func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{3}
+}
+func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CapacityRequestPolicy.Merge(m, src)
+}
+func (m *CapacityRequestPolicy) XXX_Size() int {
+	return m.Size()
+}
+func (m *CapacityRequestPolicy) XXX_DiscardUnknown() {
+	xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo
+
+func (m *CapacityRequestPolicyRange) Reset()      { *m = CapacityRequestPolicyRange{} }
+func (*CapacityRequestPolicyRange) ProtoMessage() {}
+func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{4}
+}
+func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src)
+}
+func (m *CapacityRequestPolicyRange) XXX_Size() int {
+	return m.Size()
+}
+func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo
+
+func (m *CapacityRequirements) Reset()      { *m = CapacityRequirements{} }
+func (*CapacityRequirements) ProtoMessage() {}
+func (*CapacityRequirements) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{5}
+}
+func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CapacityRequirements) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CapacityRequirements.Merge(m, src)
+}
+func (m *CapacityRequirements) XXX_Size() int {
+	return m.Size()
+}
+func (m *CapacityRequirements) XXX_DiscardUnknown() {
+	xxx_messageInfo_CapacityRequirements.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo
+
+func (m *Counter) Reset()      { *m = Counter{} }
+func (*Counter) ProtoMessage() {}
+func (*Counter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{6}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Counter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Counter.Merge(m, src)
+}
+func (m *Counter) XXX_Size() int {
+	return m.Size()
+}
+func (m *Counter) XXX_DiscardUnknown() {
+	xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
+
+func (m *CounterSet) Reset()      { *m = CounterSet{} }
+func (*CounterSet) ProtoMessage() {}
+func (*CounterSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{7}
+}
+func (m *CounterSet) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CounterSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CounterSet.Merge(m, src)
+}
+func (m *CounterSet) XXX_Size() int {
+	return m.Size()
+}
+func (m *CounterSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_CounterSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CounterSet proto.InternalMessageInfo
+
+func (m *Device) Reset()      { *m = Device{} }
+func (*Device) ProtoMessage() {}
+func (*Device) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{8}
+}
+func (m *Device) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Device) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Device.Merge(m, src)
+}
+func (m *Device) XXX_Size() int {
+	return m.Size()
+}
+func (m *Device) XXX_DiscardUnknown() {
+	xxx_messageInfo_Device.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Device proto.InternalMessageInfo
+
+func (m *DeviceAllocationConfiguration) Reset()      { *m = DeviceAllocationConfiguration{} }
+func (*DeviceAllocationConfiguration) ProtoMessage() {}
+func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{9}
+}
+func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
+}
+func (m *DeviceAllocationConfiguration) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
+
+func (m *DeviceAllocationResult) Reset()      { *m = DeviceAllocationResult{} }
+func (*DeviceAllocationResult) ProtoMessage() {}
+func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{10}
+}
+func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
+}
+func (m *DeviceAllocationResult) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
+
+func (m *DeviceAttribute) Reset()      { *m = DeviceAttribute{} }
+func (*DeviceAttribute) ProtoMessage() {}
+func (*DeviceAttribute) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{11}
+}
+func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceAttribute.Merge(m, src)
+}
+func (m *DeviceAttribute) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceAttribute) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
+
+func (m *DeviceCapacity) Reset()      { *m = DeviceCapacity{} }
+func (*DeviceCapacity) ProtoMessage() {}
+func (*DeviceCapacity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{12}
+}
+func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceCapacity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceCapacity.Merge(m, src)
+}
+func (m *DeviceCapacity) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceCapacity) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceCapacity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo
+
+func (m *DeviceClaim) Reset()      { *m = DeviceClaim{} }
+func (*DeviceClaim) ProtoMessage() {}
+func (*DeviceClaim) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{13}
+}
+func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceClaim) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceClaim.Merge(m, src)
+}
+func (m *DeviceClaim) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceClaim) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
+
+func (m *DeviceClaimConfiguration) Reset()      { *m = DeviceClaimConfiguration{} }
+func (*DeviceClaimConfiguration) ProtoMessage() {}
+func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{14}
+}
+func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
+}
+func (m *DeviceClaimConfiguration) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
+
+func (m *DeviceClass) Reset()      { *m = DeviceClass{} }
+func (*DeviceClass) ProtoMessage() {}
+func (*DeviceClass) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{15}
+}
+func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceClass) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceClass.Merge(m, src)
+}
+func (m *DeviceClass) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceClass) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceClass.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
+
+func (m *DeviceClassConfiguration) Reset()      { *m = DeviceClassConfiguration{} }
+func (*DeviceClassConfiguration) ProtoMessage() {}
+func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{16}
+}
+func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
+}
+func (m *DeviceClassConfiguration) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
+
+func (m *DeviceClassList) Reset()      { *m = DeviceClassList{} }
+func (*DeviceClassList) ProtoMessage() {}
+func (*DeviceClassList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{17}
+}
+func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceClassList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceClassList.Merge(m, src)
+}
+func (m *DeviceClassList) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceClassList) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
+
+func (m *DeviceClassSpec) Reset()      { *m = DeviceClassSpec{} }
+func (*DeviceClassSpec) ProtoMessage() {}
+func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{18}
+}
+func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceClassSpec.Merge(m, src)
+}
+func (m *DeviceClassSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceClassSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
+
+func (m *DeviceConfiguration) Reset()      { *m = DeviceConfiguration{} }
+func (*DeviceConfiguration) ProtoMessage() {}
+func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{19}
+}
+func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceConfiguration.Merge(m, src)
+}
+func (m *DeviceConfiguration) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceConfiguration) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
+
+func (m *DeviceConstraint) Reset()      { *m = DeviceConstraint{} }
+func (*DeviceConstraint) ProtoMessage() {}
+func (*DeviceConstraint) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{20}
+}
+func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceConstraint.Merge(m, src)
+}
+func (m *DeviceConstraint) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceConstraint) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
+
+func (m *DeviceCounterConsumption) Reset()      { *m = DeviceCounterConsumption{} }
+func (*DeviceCounterConsumption) ProtoMessage() {}
+func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{21}
+}
+func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
+}
+func (m *DeviceCounterConsumption) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
+
+func (m *DeviceRequest) Reset()      { *m = DeviceRequest{} }
+func (*DeviceRequest) ProtoMessage() {}
+func (*DeviceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{22}
+}
+func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceRequest.Merge(m, src)
+}
+func (m *DeviceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
+
+func (m *DeviceRequestAllocationResult) Reset()      { *m = DeviceRequestAllocationResult{} }
+func (*DeviceRequestAllocationResult) ProtoMessage() {}
+func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{23}
+}
+func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
+}
+func (m *DeviceRequestAllocationResult) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
+
+func (m *DeviceSelector) Reset()      { *m = DeviceSelector{} }
+func (*DeviceSelector) ProtoMessage() {}
+func (*DeviceSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{24}
+}
+func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceSelector.Merge(m, src)
+}
+func (m *DeviceSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
+
+func (m *DeviceSubRequest) Reset()      { *m = DeviceSubRequest{} }
+func (*DeviceSubRequest) ProtoMessage() {}
+func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{25}
+}
+func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceSubRequest.Merge(m, src)
+}
+func (m *DeviceSubRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceSubRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
+
+func (m *DeviceTaint) Reset()      { *m = DeviceTaint{} }
+func (*DeviceTaint) ProtoMessage() {}
+func (*DeviceTaint) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{26}
+}
+func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceTaint) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceTaint.Merge(m, src)
+}
+func (m *DeviceTaint) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceTaint) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
+
+func (m *DeviceToleration) Reset()      { *m = DeviceToleration{} }
+func (*DeviceToleration) ProtoMessage() {}
+func (*DeviceToleration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{27}
+}
+func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeviceToleration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceToleration.Merge(m, src)
+}
+func (m *DeviceToleration) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeviceToleration) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
+
+func (m *ExactDeviceRequest) Reset()      { *m = ExactDeviceRequest{} }
+func (*ExactDeviceRequest) ProtoMessage() {}
+func (*ExactDeviceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{28}
+}
+func (m *ExactDeviceRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExactDeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ExactDeviceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExactDeviceRequest.Merge(m, src)
+}
+func (m *ExactDeviceRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExactDeviceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExactDeviceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExactDeviceRequest proto.InternalMessageInfo
+
+func (m *NetworkDeviceData) Reset()      { *m = NetworkDeviceData{} }
+func (*NetworkDeviceData) ProtoMessage() {}
+func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{29}
+}
+func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NetworkDeviceData.Merge(m, src)
+}
+func (m *NetworkDeviceData) XXX_Size() int {
+	return m.Size()
+}
+func (m *NetworkDeviceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
+
+func (m *OpaqueDeviceConfiguration) Reset()      { *m = OpaqueDeviceConfiguration{} }
+func (*OpaqueDeviceConfiguration) ProtoMessage() {}
+func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{30}
+}
+func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
+}
+func (m *OpaqueDeviceConfiguration) XXX_Size() int {
+	return m.Size()
+}
+func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
+	xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
+
+func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
+func (*ResourceClaim) ProtoMessage() {}
+func (*ResourceClaim) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{31}
+}
+func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaim) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaim.Merge(m, src)
+}
+func (m *ResourceClaim) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaim) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+
+func (m *ResourceClaimConsumerReference) Reset()      { *m = ResourceClaimConsumerReference{} }
+func (*ResourceClaimConsumerReference) ProtoMessage() {}
+func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{32}
+}
+func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
+}
+func (m *ResourceClaimConsumerReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
+
+func (m *ResourceClaimList) Reset()      { *m = ResourceClaimList{} }
+func (*ResourceClaimList) ProtoMessage() {}
+func (*ResourceClaimList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{33}
+}
+func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimList.Merge(m, src)
+}
+func (m *ResourceClaimList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
+
+func (m *ResourceClaimSpec) Reset()      { *m = ResourceClaimSpec{} }
+func (*ResourceClaimSpec) ProtoMessage() {}
+func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{34}
+}
+func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
+}
+func (m *ResourceClaimSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
+
+func (m *ResourceClaimStatus) Reset()      { *m = ResourceClaimStatus{} }
+func (*ResourceClaimStatus) ProtoMessage() {}
+func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{35}
+}
+func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
+}
+func (m *ResourceClaimStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
+
+func (m *ResourceClaimTemplate) Reset()      { *m = ResourceClaimTemplate{} }
+func (*ResourceClaimTemplate) ProtoMessage() {}
+func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{36}
+}
+func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
+}
+func (m *ResourceClaimTemplate) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
+
+func (m *ResourceClaimTemplateList) Reset()      { *m = ResourceClaimTemplateList{} }
+func (*ResourceClaimTemplateList) ProtoMessage() {}
+func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{37}
+}
+func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
+}
+func (m *ResourceClaimTemplateList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
+
+func (m *ResourceClaimTemplateSpec) Reset()      { *m = ResourceClaimTemplateSpec{} }
+func (*ResourceClaimTemplateSpec) ProtoMessage() {}
+func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{38}
+}
+func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
+}
+func (m *ResourceClaimTemplateSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
+
+func (m *ResourcePool) Reset()      { *m = ResourcePool{} }
+func (*ResourcePool) ProtoMessage() {}
+func (*ResourcePool) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{39}
+}
+func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourcePool) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourcePool.Merge(m, src)
+}
+func (m *ResourcePool) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourcePool) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourcePool.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
+
+func (m *ResourceSlice) Reset()      { *m = ResourceSlice{} }
+func (*ResourceSlice) ProtoMessage() {}
+func (*ResourceSlice) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{40}
+}
+func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceSlice) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceSlice.Merge(m, src)
+}
+func (m *ResourceSlice) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceSlice) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
+
+func (m *ResourceSliceList) Reset()      { *m = ResourceSliceList{} }
+func (*ResourceSliceList) ProtoMessage() {}
+func (*ResourceSliceList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{41}
+}
+func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceSliceList.Merge(m, src)
+}
+func (m *ResourceSliceList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceSliceList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
+
+func (m *ResourceSliceSpec) Reset()      { *m = ResourceSliceSpec{} }
+func (*ResourceSliceSpec) ProtoMessage() {}
+func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_f4fc532aec02d243, []int{42}
+}
+func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
+}
+func (m *ResourceSliceSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1.AllocatedDeviceStatus")
+	proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1.AllocationResult")
+	proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1.CELDeviceSelector")
+	proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicy")
+	proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicyRange")
+	proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1.CapacityRequirements")
+	proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.CapacityRequirements.RequestsEntry")
+	proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1.Counter")
+	proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1.CounterSet")
+	proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.CounterSet.CountersEntry")
+	proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1.Device")
+	proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1.Device.AttributesEntry")
+	proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1.Device.CapacityEntry")
+	proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1.DeviceAllocationConfiguration")
+	proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceAllocationResult")
+	proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1.DeviceAttribute")
+	proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1.DeviceCapacity")
+	proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1.DeviceClaim")
+	proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClaimConfiguration")
+	proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1.DeviceClass")
+	proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClassConfiguration")
+	proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1.DeviceClassList")
+	proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1.DeviceClassSpec")
+	proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1.DeviceConfiguration")
+	proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1.DeviceConstraint")
+	proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption")
+	proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption.CountersEntry")
+	proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1.DeviceRequest")
+	proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult")
+	proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult.ConsumedCapacityEntry")
+	proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1.DeviceSelector")
+	proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1.DeviceSubRequest")
+	proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1.DeviceTaint")
+	proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1.DeviceToleration")
+	proto.RegisterType((*ExactDeviceRequest)(nil), "k8s.io.api.resource.v1.ExactDeviceRequest")
+	proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1.NetworkDeviceData")
+	proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1.OpaqueDeviceConfiguration")
+	proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1.ResourceClaim")
+	proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1.ResourceClaimConsumerReference")
+	proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1.ResourceClaimList")
+	proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimSpec")
+	proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1.ResourceClaimStatus")
+	proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplate")
+	proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateList")
+	proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateSpec")
+	proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1.ResourcePool")
+	proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1.ResourceSlice")
+	proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1.ResourceSliceList")
+	proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1.ResourceSliceSpec")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/api/resource/v1/generated.proto", fileDescriptor_f4fc532aec02d243)
+}
+
+var fileDescriptor_f4fc532aec02d243 = []byte{
+	// 3028 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x6c, 0x24, 0x47,
+	0xf5, 0x77, 0xcf, 0xcc, 0x8e, 0xc7, 0x6f, 0x6c, 0xaf, 0x5d, 0xbb, 0xeb, 0x4c, 0xfc, 0xff, 0xc7,
+	0xe3, 0xf4, 0x92, 0xc4, 0x49, 0x76, 0xc7, 0x6b, 0x8b, 0x44, 0x51, 0x12, 0x10, 0x1e, 0xdb, 0x9b,
+	0x38, 0xfb, 0x11, 0xa7, 0xc6, 0x6b, 0x36, 0x28, 0x84, 0xb4, 0x7b, 0xca, 0x76, 0xe3, 0x9e, 0xee,
+	0x49, 0x77, 0x8d, 0x77, 0xcd, 0x29, 0xe2, 0x00, 0x57, 0x04, 0x12, 0x02, 0x24, 0x24, 0x94, 0x03,
+	0x12, 0x17, 0x84, 0x38, 0x11, 0x04, 0x28, 0xc7, 0x08, 0x29, 0x28, 0x17, 0xa4, 0x20, 0xa1, 0x81,
+	0x1d, 0x4e, 0x48, 0x08, 0x89, 0x0b, 0x07, 0x1f, 0x10, 0xaa, 0xea, 0xaa, 0xfe, 0x9a, 0x6e, 0x4f,
+	0xdb, 0x59, 0xaf, 0x96, 0x9b, 0xe7, 0xd5, 0x7b, 0xbf, 0xaa, 0x7a, 0xf5, 0xbe, 0xea, 0x75, 0x19,
+	0x9e, 0xdc, 0x7b, 0xc1, 0xad, 0x19, 0xf6, 0xbc, 0xd6, 0x36, 0xe6, 0x1d, 0xe2, 0xda, 0x1d, 0x47,
+	0x27, 0xf3, 0xfb, 0x0b, 0xf3, 0x3b, 0xc4, 0x22, 0x8e, 0x46, 0x49, 0xb3, 0xd6, 0x76, 0x6c, 0x6a,
+	0xa3, 0x29, 0x8f, 0xaf, 0xa6, 0xb5, 0x8d, 0x9a, 0xe4, 0xab, 0xed, 0x2f, 0x4c, 0x5f, 0xde, 0x31,
+	0xe8, 0x6e, 0x67, 0xab, 0xa6, 0xdb, 0xad, 0xf9, 0x1d, 0x7b, 0xc7, 0x9e, 0xe7, 0xec, 0x5b, 0x9d,
+	0x6d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x79, 0x30, 0xd3, 0x6a, 0x68, 0x3a, 0xdd, 0x76, 0x92, 0xa6,
+	0x9a, 0xfe, 0x7c, 0xc0, 0xd3, 0xd2, 0xf4, 0x5d, 0xc3, 0x22, 0xce, 0xc1, 0x7c, 0x7b, 0x6f, 0x27,
+	0xba, 0xc6, 0xe3, 0x48, 0xb9, 0xf3, 0x2d, 0x42, 0xb5, 0xa4, 0xb9, 0xe6, 0xd3, 0xa4, 0x9c, 0x8e,
+	0x45, 0x8d, 0x56, 0xff, 0x34, 0xcf, 0x0f, 0x12, 0x70, 0xf5, 0x5d, 0xd2, 0xd2, 0xe2, 0x72, 0xea,
+	0x87, 0x79, 0xb8, 0xb0, 0x64, 0x9a, 0xb6, 0xce, 0x68, 0x2b, 0x64, 0xdf, 0xd0, 0x49, 0x83, 0x6a,
+	0xb4, 0xe3, 0xa2, 0x27, 0xa1, 0xd8, 0x74, 0x8c, 0x7d, 0xe2, 0x54, 0x94, 0x59, 0x65, 0x6e, 0xa4,
+	0x3e, 0xfe, 0x51, 0xb7, 0x3a, 0xd4, 0xeb, 0x56, 0x8b, 0x2b, 0x9c, 0x8a, 0xc5, 0x28, 0x9a, 0x85,
+	0x42, 0xdb, 0xb6, 0xcd, 0x4a, 0x8e, 0x73, 0x8d, 0x0a, 0xae, 0xc2, 0xba, 0x6d, 0x9b, 0x98, 0x8f,
+	0x70, 0x24, 0x8e, 0x5c, 0xc9, 0xc7, 0x90, 0x38, 0x15, 0x8b, 0x51, 0xf4, 0x04, 0x0c, 0xbb, 0xbb,
+	0x9a, 0x43, 0xd6, 0x56, 0x2a, 0xc3, 0x9c, 0xb1, 0xdc, 0xeb, 0x56, 0x87, 0x1b, 0x1e, 0x09, 0xcb,
+	0x31, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x34, 0xa8, 0x61, 0x5b, 0x6e, 0xa5, 0x30, 0x9b, 0x9f, 0x2b,
+	0x2f, 0xce, 0xd7, 0x02, 0x3b, 0xf0, 0xf7, 0x5f, 0x6b, 0xef, 0xed, 0x30, 0x82, 0x5b, 0x63, 0x6a,
+	0xae, 0xed, 0x2f, 0xd4, 0x96, 0xa5, 0x5c, 0x1d, 0x89, 0x35, 0x80, 0x4f, 0x72, 0x71, 0x08, 0x16,
+	0x5d, 0x83, 0x42, 0x53, 0xa3, 0x5a, 0xe5, 0xcc, 0xac, 0x32, 0x57, 0x5e, 0xbc, 0x9c, 0x0a, 0x2f,
+	0xd4, 0x5b, 0xc3, 0xda, 0x9d, 0xd5, 0xbb, 0x94, 0x58, 0x2e, 0x03, 0x2f, 0x31, 0x05, 0xac, 0x68,
+	0x54, 0xc3, 0x1c, 0x04, 0xbd, 0x05, 0x65, 0x8b, 0xd0, 0x3b, 0xb6, 0xb3, 0xc7, 0x88, 0x95, 0x22,
+	0xc7, 0x7c, 0xba, 0x96, 0x6c, 0xba, 0xb5, 0x9b, 0x82, 0x95, 0x2b, 0x85, 0x09, 0xd4, 0xcf, 0xf6,
+	0xba, 0xd5, 0xf2, 0xcd, 0x00, 0x01, 0x87, 0xe1, 0xd4, 0xdf, 0xe4, 0x60, 0x42, 0x1c, 0xa1, 0x61,
+	0x5b, 0x98, 0xb8, 0x1d, 0x93, 0xa2, 0x37, 0x61, 0xd8, 0xd3, 0xaa, 0xcb, 0x8f, 0xaf, 0xbc, 0x58,
+	0x4b, 0x9b, 0xce, 0x9b, 0x27, 0x0e, 0x50, 0x3f, 0x2b, 0x14, 0x34, 0xec, 0x8d, 0xbb, 0x58, 0xe2,
+	0xa1, 0x4d, 0x18, 0xb5, 0xec, 0x26, 0x69, 0x10, 0x93, 0xe8, 0xd4, 0x76, 0xf8, 0xa1, 0x96, 0x17,
+	0x67, 0xc3, 0xf8, 0xcc, 0x85, 0xf8, 0x56, 0x42, 0x7c, 0xf5, 0x89, 0x5e, 0xb7, 0x3a, 0x1a, 0xa6,
+	0xe0, 0x08, 0x0e, 0xea, 0xc0, 0x39, 0xcd, 0x5f, 0xc5, 0x86, 0xd1, 0x22, 0x2e, 0xd5, 0x5a, 0x6d,
+	0x71, 0x02, 0xcf, 0x64, 0x3b, 0x60, 0x26, 0x56, 0x7f, 0xa4, 0xd7, 0xad, 0x9e, 0x5b, 0xea, 0x87,
+	0xc2, 0x49, 0xf8, 0xea, 0x2b, 0x30, 0xb9, 0xbc, 0x7a, 0x5d, 0x98, 0xbe, 0x5c, 0xcb, 0x22, 0x00,
+	0xb9, 0xdb, 0x76, 0x88, 0xcb, 0xce, 0x53, 0x38, 0x80, 0x6f, 0x32, 0xab, 0xfe, 0x08, 0x0e, 0x71,
+	0xa9, 0x1f, 0xe4, 0xe0, 0xc2, 0xb2, 0xd6, 0xd6, 0x74, 0x83, 0x1e, 0x60, 0xf2, 0x6e, 0x87, 0xb8,
+	0x74, 0xdd, 0x36, 0x0d, 0xfd, 0x00, 0xdd, 0x62, 0x87, 0xb1, 0xad, 0x75, 0x4c, 0x9a, 0x70, 0x18,
+	0x7d, 0xbb, 0x09, 0x4e, 0xe7, 0x8d, 0x8e, 0x66, 0x51, 0x83, 0x1e, 0x78, 0x8e, 0xb0, 0xe2, 0x41,
+	0x60, 0x89, 0x85, 0x08, 0x94, 0xf7, 0x35, 0xd3, 0x68, 0x6e, 0x6a, 0x66, 0x87, 0xb8, 0x95, 0x3c,
+	0xf7, 0x84, 0xe3, 0x42, 0x9f, 0x13, 0xbb, 0x2a, 0x6f, 0x06, 0x50, 0x38, 0x8c, 0x8b, 0xb6, 0x00,
+	0xf8, 0x4f, 0xac, 0x59, 0x3b, 0xa4, 0x52, 0xe0, 0x1b, 0x58, 0x4c, 0xb3, 0xa6, 0x44, 0x05, 0x70,
+	0xc9, 0xfa, 0x38, 0xd3, 0xdd, 0xa6, 0x8f, 0x84, 0x43, 0xa8, 0xea, 0x7b, 0x39, 0x98, 0x4e, 0x17,
+	0x45, 0x6b, 0x90, 0x6f, 0x19, 0xd6, 0x09, 0x95, 0x37, 0xdc, 0xeb, 0x56, 0xf3, 0x37, 0x0c, 0x0b,
+	0x33, 0x0c, 0x0e, 0xa5, 0xdd, 0xe5, 0xd1, 0xea, 0xa4, 0x50, 0xda, 0x5d, 0xcc, 0x30, 0xd0, 0x75,
+	0x28, 0xb8, 0x94, 0xb4, 0x85, 0x03, 0x1c, 0x17, 0x8b, 0x07, 0x89, 0x06, 0x25, 0x6d, 0xcc, 0x51,
+	0xd4, 0xff, 0x28, 0x70, 0x3e, 0xac, 0x02, 0xc3, 0x21, 0x2d, 0x62, 0x51, 0x17, 0x1d, 0x40, 0xc9,
+	0xf1, 0x54, 0xc2, 0x7c, 0x99, 0x9d, 0xf1, 0x8b, 0x59, 0xb4, 0x2f, 0xe5, 0x6b, 0x42, 0x9f, 0xee,
+	0xaa, 0x45, 0x9d, 0x83, 0xfa, 0xe3, 0xe2, 0xbc, 0x4b, 0x92, 0xfc, 0xcd, 0xbf, 0x54, 0xc7, 0xde,
+	0xe8, 0x68, 0xa6, 0xb1, 0x6d, 0x90, 0xe6, 0x4d, 0xad, 0x45, 0xb0, 0x3f, 0xdd, 0xf4, 0x1e, 0x8c,
+	0x45, 0xa4, 0xd1, 0x04, 0xe4, 0xf7, 0xc8, 0x81, 0xe7, 0x10, 0x98, 0xfd, 0x89, 0x56, 0xe0, 0xcc,
+	0x3e, 0xb3, 0x93, 0x93, 0x69, 0x14, 0x7b, 0xc2, 0x2f, 0xe6, 0x5e, 0x50, 0xd4, 0xb7, 0x61, 0x78,
+	0xd9, 0xee, 0x58, 0x94, 0x38, 0xa8, 0x21, 0x41, 0x4f, 0x76, 0xe2, 0x63, 0x62, 0x8f, 0x67, 0xb8,
+	0x05, 0x8b, 0x39, 0xd4, 0x7f, 0x28, 0x00, 0x62, 0x82, 0x06, 0xa1, 0x2c, 0x6f, 0x59, 0x5a, 0x8b,
+	0x08, 0xe7, 0xf6, 0xf3, 0x16, 0xd7, 0x00, 0x1f, 0x41, 0x6f, 0x43, 0x49, 0xf7, 0xf8, 0xdd, 0x4a,
+	0x8e, 0x2b, 0xfe, 0x4a, 0xaa, 0xe2, 0x7d, 0x5c, 0xf9, 0xa7, 0x50, 0xf7, 0x84, 0x54, 0xb7, 0x24,
+	0x63, 0x1f, 0x73, 0xfa, 0x2d, 0x18, 0x8b, 0x30, 0x27, 0x68, 0xf7, 0xb9, 0xa8, 0x76, 0xab, 0x03,
+	0xe6, 0x0f, 0xab, 0xf3, 0xdf, 0x25, 0x10, 0x09, 0x36, 0xc3, 0x56, 0x5d, 0x00, 0x8d, 0x52, 0xc7,
+	0xd8, 0xea, 0x50, 0x22, 0x37, 0x3b, 0x20, 0x63, 0xd4, 0x96, 0x7c, 0x01, 0x6f, 0xab, 0x17, 0x65,
+	0x7c, 0x0c, 0x06, 0xfa, 0x6d, 0x2b, 0x34, 0x0d, 0xda, 0x83, 0x92, 0x2e, 0x0c, 0x56, 0x04, 0xaf,
+	0x4b, 0x03, 0xa6, 0x94, 0xf6, 0x1d, 0x33, 0x65, 0x49, 0x4e, 0x30, 0x65, 0x39, 0x01, 0xda, 0x87,
+	0x09, 0xdd, 0xb6, 0xdc, 0x4e, 0x8b, 0xb8, 0x52, 0xe9, 0xa2, 0x76, 0xb8, 0x72, 0xf4, 0xa4, 0x82,
+	0x7b, 0x99, 0x0b, 0xb7, 0x79, 0xf1, 0x50, 0x11, 0x13, 0x4f, 0x2c, 0xc7, 0x10, 0x71, 0xdf, 0x1c,
+	0x68, 0x0e, 0x4a, 0x2c, 0xcb, 0xb1, 0xd5, 0xf0, 0x54, 0x36, 0x52, 0x1f, 0x65, 0x4b, 0xbe, 0x29,
+	0x68, 0xd8, 0x1f, 0xed, 0xcb, 0xab, 0xc5, 0xfb, 0x94, 0x57, 0xe7, 0xa0, 0xa4, 0x99, 0x26, 0x63,
+	0x70, 0x79, 0x5d, 0x55, 0xf2, 0x56, 0xb0, 0x24, 0x68, 0xd8, 0x1f, 0x45, 0xd7, 0xa0, 0x48, 0x35,
+	0xc3, 0xa2, 0x6e, 0xa5, 0xc4, 0x35, 0x73, 0xf1, 0x68, 0xcd, 0x6c, 0x30, 0xde, 0xa0, 0x9a, 0xe3,
+	0x3f, 0x5d, 0x2c, 0x20, 0xd0, 0x02, 0x94, 0xb7, 0x0c, 0xab, 0xe9, 0x6e, 0xd8, 0x0c, 0xbc, 0x32,
+	0xc2, 0x67, 0xe6, 0x95, 0x4c, 0x3d, 0x20, 0xe3, 0x30, 0x0f, 0x5a, 0x86, 0x49, 0xf6, 0xd3, 0xb0,
+	0x76, 0x82, 0xaa, 0xac, 0x02, 0xb3, 0xf9, 0xb9, 0x91, 0xfa, 0x85, 0x5e, 0xb7, 0x3a, 0x59, 0x8f,
+	0x0f, 0xe2, 0x7e, 0x7e, 0x74, 0x1b, 0x2a, 0x82, 0x78, 0x55, 0x33, 0xcc, 0x8e, 0x43, 0x42, 0x58,
+	0x65, 0x8e, 0xf5, 0xff, 0xbd, 0x6e, 0xb5, 0x52, 0x4f, 0xe1, 0xc1, 0xa9, 0xd2, 0x0c, 0x99, 0x15,
+	0x10, 0x77, 0x6e, 0x74, 0x4c, 0x6a, 0xb4, 0xcd, 0x50, 0xcd, 0xe4, 0x56, 0x46, 0xf9, 0xf6, 0x38,
+	0xf2, 0x52, 0x0a, 0x0f, 0x4e, 0x95, 0x9e, 0xde, 0x86, 0xb3, 0x31, 0x6f, 0x4a, 0x88, 0x05, 0x5f,
+	0x88, 0xc6, 0x82, 0xa7, 0x06, 0x14, 0x74, 0x12, 0x2f, 0x14, 0x13, 0xa6, 0x75, 0x18, 0x8b, 0xb8,
+	0x50, 0xc2, 0x2c, 0x2f, 0x47, 0x67, 0x79, 0x72, 0x80, 0x73, 0xc8, 0x84, 0x13, 0x0a, 0x3c, 0xdf,
+	0xce, 0xc1, 0x63, 0xf1, 0xa2, 0x72, 0xd9, 0xb6, 0xb6, 0x8d, 0x9d, 0x8e, 0xc3, 0x7f, 0xa0, 0x2f,
+	0x41, 0xd1, 0x03, 0x12, 0x11, 0x69, 0x4e, 0x9a, 0x50, 0x83, 0x53, 0x0f, 0xbb, 0xd5, 0xa9, 0xb8,
+	0xa8, 0x37, 0x82, 0x85, 0x1c, 0xb3, 0x69, 0x3f, 0x27, 0xe6, 0xf8, 0xa1, 0x8e, 0x86, 0x73, 0x5a,
+	0x90, 0xc2, 0xd0, 0x37, 0xe0, 0x5c, 0x53, 0xf8, 0x71, 0x68, 0x09, 0x22, 0x67, 0x3f, 0x3b, 0xc8,
+	0xf5, 0x43, 0x22, 0xf5, 0xff, 0x13, 0xab, 0x3c, 0x97, 0x30, 0x88, 0x93, 0x26, 0x51, 0xff, 0xa4,
+	0xc0, 0x54, 0x72, 0x79, 0x8d, 0xde, 0x81, 0x61, 0x87, 0xff, 0x25, 0x73, 0xfa, 0x73, 0x47, 0x2f,
+	0x45, 0xec, 0x2c, 0xbd, 0x4c, 0xf7, 0x7e, 0xbb, 0x58, 0xc2, 0xa2, 0xaf, 0x42, 0x51, 0xe7, 0xab,
+	0x11, 0xe1, 0xfc, 0xb9, 0xac, 0x17, 0x80, 0xe8, 0xae, 0x7d, 0xf7, 0xf6, 0xc8, 0x58, 0x80, 0xaa,
+	0x3f, 0x53, 0xe0, 0x6c, 0xcc, 0xd2, 0xd0, 0x0c, 0xe4, 0x0d, 0x8b, 0x72, 0xcb, 0xc9, 0x7b, 0x07,
+	0xb2, 0x66, 0x51, 0x2f, 0x07, 0xb3, 0x01, 0xf4, 0x38, 0x14, 0xb6, 0xd8, 0x55, 0x31, 0xcf, 0x9d,
+	0x65, 0xac, 0xd7, 0xad, 0x8e, 0xd4, 0x6d, 0xdb, 0xf4, 0x38, 0xf8, 0x10, 0x7a, 0x0a, 0x8a, 0x2e,
+	0x75, 0x0c, 0x6b, 0x87, 0x17, 0x9a, 0x23, 0x5e, 0xc0, 0x68, 0x70, 0x8a, 0xc7, 0x26, 0x86, 0xd1,
+	0x33, 0x30, 0xbc, 0x4f, 0x1c, 0x5e, 0x9e, 0x7b, 0x61, 0x95, 0x87, 0xc1, 0x4d, 0x8f, 0xe4, 0xb1,
+	0x4a, 0x06, 0xf5, 0x63, 0x05, 0xc6, 0xa3, 0xf6, 0x7a, 0x2a, 0x15, 0x06, 0xda, 0x86, 0x31, 0x27,
+	0x5c, 0xbc, 0x0a, 0x1f, 0xba, 0x7c, 0xac, 0x62, 0xb9, 0x3e, 0xd9, 0xeb, 0x56, 0xc7, 0xa2, 0x45,
+	0x70, 0x14, 0x56, 0xfd, 0x71, 0x0e, 0xca, 0x62, 0x3f, 0xa6, 0x66, 0xb4, 0x50, 0xa3, 0xaf, 0x42,
+	0x7c, 0x22, 0x93, 0x35, 0x05, 0xd5, 0x49, 0x82, 0xe3, 0x7c, 0x0d, 0xca, 0x2c, 0x99, 0x51, 0xc7,
+	0xcb, 0x08, 0x9e, 0x11, 0xcd, 0x0d, 0x74, 0x18, 0x21, 0x10, 0xdc, 0x2b, 0x02, 0x9a, 0x8b, 0xc3,
+	0x88, 0xe8, 0xb6, 0x6f, 0xa0, 0xf9, 0x4c, 0x79, 0x98, 0x6d, 0x35, 0x9b, 0x6d, 0x7e, 0xa8, 0x40,
+	0x25, 0x4d, 0x28, 0x12, 0x3a, 0x94, 0x93, 0x84, 0x8e, 0xdc, 0x83, 0x08, 0x1d, 0xbf, 0x56, 0x42,
+	0x47, 0xec, 0xba, 0xe8, 0x1d, 0x28, 0xb1, 0x3b, 0x2e, 0xef, 0x49, 0x78, 0x26, 0x7b, 0x25, 0xdb,
+	0x8d, 0xf8, 0xf5, 0xad, 0xaf, 0x13, 0x9d, 0xde, 0x20, 0x54, 0x0b, 0x2e, 0xb0, 0x01, 0x0d, 0xfb,
+	0xa8, 0x68, 0x0d, 0x0a, 0x6e, 0x9b, 0xe8, 0xd9, 0xb2, 0x0b, 0x5f, 0x54, 0xa3, 0x4d, 0xf4, 0xa0,
+	0x9a, 0x64, 0xbf, 0x30, 0x87, 0x50, 0xbf, 0x1f, 0xd6, 0xbf, 0xeb, 0x46, 0xf5, 0x9f, 0xa2, 0x55,
+	0xe5, 0x41, 0x68, 0xf5, 0x03, 0x3f, 0x68, 0xf1, 0x85, 0x5d, 0x37, 0x5c, 0x8a, 0xde, 0xea, 0xd3,
+	0x6c, 0x2d, 0x9b, 0x66, 0x99, 0x34, 0xd7, 0xab, 0xef, 0x45, 0x92, 0x12, 0xd2, 0xea, 0xab, 0x70,
+	0xc6, 0xa0, 0xa4, 0x25, 0xfd, 0xe7, 0x62, 0x06, 0xb5, 0x06, 0xc1, 0x65, 0x8d, 0x49, 0x62, 0x0f,
+	0x40, 0xfd, 0x6e, 0x2e, 0xb2, 0x76, 0xa6, 0x6e, 0xf4, 0x65, 0x18, 0x71, 0x45, 0x99, 0x27, 0x3d,
+	0x7f, 0x40, 0xc2, 0xf6, 0xab, 0xc6, 0x49, 0x31, 0xc9, 0x88, 0xa4, 0xb8, 0x38, 0xc0, 0x0a, 0xf9,
+	0x66, 0x2e, 0xa3, 0x6f, 0xc6, 0x8e, 0x39, 0xcd, 0x37, 0xd1, 0x75, 0x38, 0x4f, 0xee, 0x52, 0x62,
+	0x35, 0x49, 0x13, 0x0b, 0x1c, 0x5e, 0x1b, 0x7b, 0xe1, 0xbe, 0xd2, 0xeb, 0x56, 0xcf, 0xaf, 0x26,
+	0x8c, 0xe3, 0x44, 0x29, 0xd5, 0x84, 0xa4, 0xc3, 0x47, 0xb7, 0xa0, 0x68, 0xb7, 0xb5, 0x77, 0xfd,
+	0xf0, 0xbe, 0x90, 0xb6, 0xfc, 0xd7, 0x39, 0x57, 0x92, 0x71, 0x01, 0x5b, 0xbb, 0x37, 0x8c, 0x05,
+	0x98, 0xfa, 0x77, 0x05, 0x26, 0xe2, 0x81, 0xee, 0x18, 0xf1, 0x64, 0x1d, 0xc6, 0x5b, 0x1a, 0xd5,
+	0x77, 0xfd, 0x84, 0x29, 0x7a, 0xa6, 0x73, 0xbd, 0x6e, 0x75, 0xfc, 0x46, 0x64, 0xe4, 0xb0, 0x5b,
+	0x45, 0x57, 0x3b, 0xa6, 0x79, 0x10, 0xbd, 0xce, 0xc4, 0xe4, 0xd1, 0x9b, 0x30, 0xd9, 0x34, 0x5c,
+	0x6a, 0x58, 0x3a, 0x0d, 0x40, 0xbd, 0x26, 0xeb, 0xb3, 0xac, 0x60, 0x5e, 0x89, 0x0f, 0xa6, 0xe0,
+	0xf6, 0xa3, 0xa8, 0x3f, 0xca, 0xf9, 0x3e, 0xdc, 0x77, 0x01, 0x42, 0x8b, 0x00, 0xba, 0x7f, 0xe3,
+	0x8d, 0xb7, 0xc7, 0x82, 0xbb, 0x30, 0x0e, 0x71, 0x21, 0xb3, 0xef, 0x36, 0xfd, 0xc5, 0xe3, 0x5e,
+	0xbc, 0x1e, 0x9a, 0xbb, 0xf5, 0x3f, 0x15, 0x18, 0x8b, 0x64, 0xd2, 0x0c, 0x57, 0xec, 0x37, 0x60,
+	0x98, 0xdc, 0xd5, 0x74, 0x6a, 0xca, 0xb2, 0xe0, 0x99, 0xb4, 0x09, 0x57, 0x19, 0x5b, 0x34, 0x51,
+	0xf3, 0x06, 0xe0, 0xaa, 0x27, 0x8e, 0x25, 0x0e, 0xda, 0x85, 0xf1, 0x6d, 0xc3, 0x71, 0xe9, 0xd2,
+	0xbe, 0x66, 0x98, 0xda, 0x96, 0x49, 0x44, 0x26, 0x1d, 0x90, 0xa5, 0x1b, 0x9d, 0x2d, 0x89, 0x3b,
+	0x25, 0x16, 0x3a, 0x7e, 0x35, 0x82, 0x83, 0x63, 0xb8, 0xea, 0x1f, 0x8b, 0xb2, 0xa6, 0x4f, 0x29,
+	0x44, 0xd1, 0xd3, 0xac, 0xa0, 0xe5, 0x43, 0x42, 0x07, 0xa1, 0xca, 0x94, 0x93, 0xb1, 0x1c, 0x0f,
+	0x7d, 0x59, 0xc8, 0x65, 0xfa, 0xb2, 0x90, 0xcf, 0xf0, 0x65, 0xa1, 0x70, 0xe4, 0x97, 0x85, 0x05,
+	0x28, 0x6b, 0xcd, 0x96, 0x61, 0x2d, 0xe9, 0x3a, 0x71, 0x5d, 0x5e, 0x30, 0x8a, 0xbb, 0xe8, 0x52,
+	0x40, 0xc6, 0x61, 0x1e, 0x56, 0xfe, 0x50, 0xdb, 0x24, 0x8e, 0xb8, 0xdf, 0x15, 0xb3, 0x28, 0x76,
+	0xc3, 0x17, 0x08, 0xca, 0x9f, 0x80, 0xe6, 0xe2, 0x30, 0x62, 0xf2, 0x65, 0x77, 0xf8, 0x3e, 0x5e,
+	0x76, 0x4b, 0x9f, 0xe9, 0xb2, 0xfb, 0x5a, 0xf0, 0x31, 0x66, 0x84, 0xeb, 0xf6, 0x4a, 0xe8, 0x63,
+	0xcc, 0x61, 0xb7, 0xfa, 0x78, 0xda, 0x07, 0x27, 0x7a, 0xd0, 0x26, 0x6e, 0xed, 0x56, 0xf8, 0x8b,
+	0xcd, 0xfb, 0x8a, 0xdf, 0x7c, 0x69, 0xca, 0x9a, 0x97, 0xdf, 0xeb, 0xcb, 0x8b, 0xd7, 0x4e, 0x74,
+	0xed, 0xa9, 0x2d, 0xc7, 0xd0, 0xbc, 0x80, 0xf0, 0x74, 0xac, 0x2f, 0xd3, 0x4c, 0x6f, 0x0c, 0xf5,
+	0xad, 0x67, 0xda, 0x85, 0x0b, 0x89, 0xa8, 0xa7, 0xda, 0xf3, 0xdc, 0x94, 0x17, 0x13, 0xbf, 0x5b,
+	0xb3, 0x02, 0x79, 0x9d, 0x98, 0x22, 0x6f, 0xa5, 0x7e, 0x23, 0xea, 0xfb, 0x62, 0xe1, 0xb5, 0xa6,
+	0x97, 0x57, 0xaf, 0x63, 0x26, 0xae, 0x7e, 0xab, 0x20, 0x33, 0x55, 0xe0, 0xec, 0x19, 0x62, 0xd4,
+	0x12, 0x9c, 0x6d, 0x06, 0x09, 0x9d, 0xe7, 0x65, 0xcf, 0x45, 0x1f, 0x11, 0xcc, 0xe1, 0x0a, 0x84,
+	0xcb, 0xc5, 0xf9, 0xa3, 0x25, 0x49, 0xfe, 0x3e, 0x96, 0x24, 0x9b, 0x30, 0x1e, 0x7c, 0xbe, 0xb9,
+	0x61, 0x37, 0xa5, 0xcf, 0xd7, 0x64, 0x08, 0x5b, 0x8a, 0x8c, 0x1e, 0x76, 0xab, 0xe7, 0xe3, 0x37,
+	0x5b, 0x46, 0xc7, 0x31, 0x14, 0x74, 0x11, 0xce, 0xf0, 0xac, 0xc1, 0xa3, 0x42, 0x3e, 0x28, 0xbe,
+	0x78, 0xd8, 0xc7, 0xde, 0xd8, 0xe9, 0x47, 0x83, 0xcd, 0x50, 0x2f, 0x74, 0x98, 0x9f, 0xfd, 0xa5,
+	0xe3, 0x34, 0xf9, 0xbd, 0x9a, 0xc3, 0x1f, 0xf1, 0xb1, 0xd4, 0x7f, 0xf9, 0xf7, 0x08, 0xde, 0x9e,
+	0x43, 0x8f, 0x85, 0x8c, 0xb9, 0x5e, 0x16, 0xcb, 0xca, 0x5f, 0x23, 0x07, 0x9e, 0x65, 0x5f, 0x0c,
+	0x5b, 0xf6, 0x48, 0xca, 0x35, 0xf7, 0x25, 0x28, 0x92, 0xed, 0x6d, 0xa2, 0x53, 0x11, 0x99, 0x65,
+	0xe3, 0xb7, 0xb8, 0xca, 0xa9, 0x87, 0xac, 0xf0, 0x08, 0xa6, 0xf4, 0x88, 0x58, 0x88, 0x30, 0xfb,
+	0xa0, 0x46, 0x8b, 0x2c, 0x35, 0x9b, 0xa4, 0x29, 0x3e, 0x26, 0x1d, 0xe7, 0xdb, 0x1e, 0x6f, 0x1a,
+	0x6c, 0x48, 0x00, 0x1c, 0x60, 0xbd, 0x58, 0xfa, 0xc1, 0x4f, 0xaa, 0x43, 0xef, 0xfd, 0x79, 0x76,
+	0x48, 0x7d, 0x3f, 0x27, 0x8d, 0x3f, 0x50, 0xf7, 0xa0, 0x8d, 0xbf, 0x0a, 0x25, 0xbb, 0xcd, 0x78,
+	0x6d, 0x99, 0x95, 0x2e, 0xc9, 0xea, 0xe2, 0x75, 0x41, 0x3f, 0xec, 0x56, 0x2b, 0x71, 0x58, 0x39,
+	0x86, 0x7d, 0xe9, 0x40, 0x85, 0xf9, 0x4c, 0x2a, 0x2c, 0x1c, 0x5f, 0x85, 0xcb, 0x30, 0x19, 0x98,
+	0x4e, 0x83, 0xe8, 0xb6, 0xd5, 0x74, 0x85, 0xf5, 0xf2, 0xcc, 0xb1, 0x11, 0x1f, 0xc4, 0xfd, 0xfc,
+	0xea, 0x0f, 0x0b, 0x80, 0xfa, 0x0b, 0x8d, 0xa4, 0x08, 0xa0, 0x7c, 0x96, 0x08, 0x90, 0x3b, 0xd5,
+	0x08, 0x90, 0xbf, 0xbf, 0x11, 0xa0, 0x70, 0x44, 0x04, 0x78, 0x18, 0x4b, 0x88, 0xd3, 0x0a, 0x1a,
+	0x3f, 0x57, 0x60, 0xb2, 0xef, 0x15, 0x02, 0x7a, 0x09, 0xc6, 0x0c, 0x56, 0x08, 0x6f, 0x6b, 0xe2,
+	0xca, 0xe6, 0x19, 0xc6, 0x05, 0xb1, 0xcc, 0xb1, 0xb5, 0xf0, 0x20, 0x8e, 0xf2, 0xa2, 0x47, 0x21,
+	0x6f, 0xb4, 0x65, 0xaf, 0x96, 0xe7, 0xaa, 0xb5, 0x75, 0x17, 0x33, 0x1a, 0x33, 0xb9, 0x5d, 0xcd,
+	0x69, 0xde, 0xd1, 0x1c, 0xe6, 0xc9, 0x0e, 0xd3, 0x6e, 0x3e, 0x6a, 0x72, 0xaf, 0x46, 0x87, 0x71,
+	0x9c, 0x5f, 0xfd, 0xa9, 0x02, 0x8f, 0xa6, 0x5e, 0xe5, 0x32, 0xbf, 0x64, 0xd1, 0x00, 0xda, 0x9a,
+	0xa3, 0xb5, 0x88, 0xb8, 0xa3, 0x9c, 0xe0, 0xe5, 0x87, 0x7f, 0x09, 0x5a, 0xf7, 0x81, 0x70, 0x08,
+	0x54, 0xfd, 0x5e, 0x0e, 0xc6, 0xe4, 0x05, 0xd6, 0xeb, 0xdd, 0x9d, 0x7e, 0x63, 0xe7, 0x5a, 0xa4,
+	0xb1, 0x93, 0x5a, 0x52, 0x44, 0x96, 0x95, 0xd6, 0xda, 0x41, 0x0d, 0x28, 0xba, 0xfc, 0x7d, 0xd0,
+	0xa0, 0x0e, 0x7a, 0x14, 0x8e, 0x8b, 0x04, 0x8a, 0xf7, 0x7e, 0x63, 0x01, 0xa5, 0xf6, 0x14, 0x98,
+	0x89, 0xf0, 0x8b, 0x42, 0xcc, 0xc1, 0x64, 0x9b, 0x38, 0xc4, 0xd2, 0x09, 0xba, 0x04, 0x25, 0xad,
+	0x6d, 0xbc, 0xe2, 0xd8, 0x9d, 0xb6, 0x38, 0x45, 0xff, 0xf6, 0xb7, 0xb4, 0xbe, 0xc6, 0xe9, 0xd8,
+	0xe7, 0x60, 0xdc, 0x72, 0x2d, 0xc2, 0x96, 0x42, 0x9d, 0x4e, 0x8f, 0x8e, 0x7d, 0x0e, 0xbf, 0x2e,
+	0x2a, 0xa4, 0xd6, 0x45, 0x75, 0xc8, 0x77, 0x8c, 0xa6, 0x68, 0x34, 0x5f, 0x91, 0xc9, 0xe3, 0x56,
+	0xd6, 0x42, 0x98, 0x09, 0xab, 0xbf, 0x55, 0x60, 0x32, 0xb2, 0xc9, 0x07, 0xd0, 0x7d, 0x7a, 0x2d,
+	0xda, 0x7d, 0x7a, 0x22, 0xd3, 0x61, 0xa5, 0xf4, 0x9f, 0xf4, 0xd8, 0xf2, 0x79, 0x03, 0xea, 0x66,
+	0xfc, 0x99, 0xd1, 0xc5, 0x0c, 0x4d, 0xdc, 0xf4, 0xb7, 0x45, 0xea, 0xaf, 0x72, 0x70, 0x2e, 0xc1,
+	0x72, 0xd0, 0x6d, 0x80, 0x20, 0x68, 0x8b, 0xa9, 0x52, 0x23, 0x69, 0xdf, 0x47, 0x12, 0xfe, 0xf2,
+	0x24, 0x44, 0x0d, 0x61, 0xa1, 0x16, 0x94, 0x1d, 0xe2, 0x12, 0x67, 0x9f, 0x34, 0xaf, 0xf2, 0xdc,
+	0xcf, 0x14, 0xf5, 0x7c, 0x26, 0x45, 0xf5, 0x59, 0x69, 0x10, 0xb2, 0x71, 0x00, 0x89, 0xc3, 0xf8,
+	0xe8, 0x76, 0xa0, 0x30, 0xef, 0xeb, 0xf3, 0xe5, 0x01, 0xbb, 0x88, 0xbe, 0xca, 0x3b, 0x42, 0x75,
+	0x7f, 0x50, 0xe0, 0x42, 0x64, 0x79, 0x1b, 0xa4, 0xd5, 0x36, 0x35, 0x4a, 0x1e, 0x40, 0x88, 0x69,
+	0x44, 0x42, 0xcc, 0x42, 0x26, 0xed, 0xc9, 0xe5, 0xa5, 0x76, 0x91, 0x3f, 0x56, 0xe0, 0xd1, 0x44,
+	0x89, 0x07, 0xe0, 0x38, 0x38, 0xea, 0x38, 0x97, 0x8f, 0xb5, 0xa3, 0x14, 0x07, 0xfa, 0x7d, 0xda,
+	0x7e, 0xb8, 0x27, 0xfd, 0x6f, 0xe5, 0x01, 0xf5, 0x17, 0x0a, 0x8c, 0x4a, 0xce, 0x75, 0xdb, 0x36,
+	0x33, 0x5c, 0x2e, 0x17, 0x01, 0xc4, 0xeb, 0x53, 0xf9, 0x15, 0x25, 0x1f, 0xac, 0xf8, 0x15, 0x7f,
+	0x04, 0x87, 0xb8, 0xd0, 0x6b, 0x80, 0xe4, 0xda, 0x1a, 0xa6, 0xec, 0x09, 0xf2, 0x90, 0x9e, 0xaf,
+	0x4f, 0x0b, 0x59, 0x84, 0xfb, 0x38, 0x70, 0x82, 0x94, 0xfa, 0x3b, 0x25, 0xc8, 0xbd, 0x9c, 0xfc,
+	0xf0, 0xe9, 0x9c, 0x2f, 0x2b, 0x55, 0xe7, 0xe1, 0x0c, 0xc2, 0x39, 0x1f, 0xc2, 0x0c, 0xc2, 0xd7,
+	0x95, 0xe2, 0x00, 0xbf, 0x2c, 0xc4, 0xd6, 0xcf, 0x0d, 0x3f, 0x6b, 0x75, 0x76, 0x35, 0xf4, 0xce,
+	0xb8, 0xbc, 0xf8, 0xb9, 0x41, 0x0b, 0x61, 0x46, 0x99, 0xd8, 0x33, 0x0c, 0x3f, 0xc8, 0xc9, 0x1f,
+	0xeb, 0x41, 0x4e, 0xe1, 0x14, 0x1e, 0xe4, 0x9c, 0x39, 0xf2, 0x41, 0xce, 0x5a, 0x90, 0x2d, 0xbc,
+	0xdb, 0xc3, 0xcc, 0xd1, 0xe9, 0xf5, 0x88, 0x57, 0xbb, 0x18, 0xa6, 0xda, 0xc4, 0xf1, 0xc8, 0xc1,
+	0xda, 0x98, 0x27, 0x7a, 0x6f, 0x82, 0xa6, 0x7b, 0xdd, 0xea, 0xd4, 0x7a, 0x22, 0x07, 0x4e, 0x91,
+	0x44, 0x5b, 0x30, 0xce, 0x5b, 0x7c, 0x4d, 0xff, 0x45, 0x95, 0xf7, 0x6e, 0x48, 0x1d, 0xfc, 0x4c,
+	0x2e, 0xe8, 0x3c, 0x37, 0x22, 0x08, 0x38, 0x86, 0x58, 0x7f, 0xf9, 0xa3, 0x7b, 0x33, 0x43, 0x9f,
+	0xdc, 0x9b, 0x19, 0xfa, 0xf4, 0xde, 0xcc, 0xd0, 0x7b, 0xbd, 0x19, 0xe5, 0xa3, 0xde, 0x8c, 0xf2,
+	0x49, 0x6f, 0x46, 0xf9, 0xb4, 0x37, 0xa3, 0xfc, 0xb5, 0x37, 0xa3, 0x7c, 0xe7, 0x6f, 0x33, 0x43,
+	0x5f, 0x99, 0x4a, 0xfe, 0x77, 0x81, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x0c, 0xec, 0x16,
+	0x47, 0x30, 0x00, 0x00,
+}
+
+func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ShareID != nil {
+		i -= len(*m.ShareID)
+		copy(dAtA[i:], *m.ShareID)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID)))
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.NetworkData != nil {
+		{
+			size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.Data != nil {
+		{
+			size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	i -= len(m.Device)
+	copy(dAtA[i:], m.Device)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Pool)
+	copy(dAtA[i:], m.Pool)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AllocationResult) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.AllocationTimestamp != nil {
+		{
+			size, err := m.AllocationTimestamp.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.NodeSelector != nil {
+		{
+			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	{
+		size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Expression)
+	copy(dAtA[i:], m.Expression)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CapacityRequestPolicy) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CapacityRequestPolicy) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CapacityRequestPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ValidRange != nil {
+		{
+			size, err := m.ValidRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.ValidValues) > 0 {
+		for iNdEx := len(m.ValidValues) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ValidValues[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Default != nil {
+		{
+			size, err := m.Default.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CapacityRequestPolicyRange) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CapacityRequestPolicyRange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CapacityRequestPolicyRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Step != nil {
+		{
+			size, err := m.Step.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Max != nil {
+		{
+			size, err := m.Max.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Min != nil {
+		{
+			size, err := m.Min.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CapacityRequirements) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CapacityRequirements) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Requests) > 0 {
+		keysForRequests := make([]string, 0, len(m.Requests))
+		for k := range m.Requests {
+			keysForRequests = append(keysForRequests, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+		for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Requests[QualifiedName(keysForRequests[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForRequests[iNdEx])
+			copy(dAtA[i:], keysForRequests[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Counter) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Counter) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CounterSet) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CounterSet) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Counters) > 0 {
+		keysForCounters := make([]string, 0, len(m.Counters))
+		for k := range m.Counters {
+			keysForCounters = append(keysForCounters, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+		for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Counters[string(keysForCounters[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForCounters[iNdEx])
+			copy(dAtA[i:], keysForCounters[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Device) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Device) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.AllowMultipleAllocations != nil {
+		i--
+		if *m.AllowMultipleAllocations {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x60
+	}
+	if len(m.BindingFailureConditions) > 0 {
+		for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.BindingFailureConditions[iNdEx])
+			copy(dAtA[i:], m.BindingFailureConditions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx])))
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if len(m.BindingConditions) > 0 {
+		for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.BindingConditions[iNdEx])
+			copy(dAtA[i:], m.BindingConditions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx])))
+			i--
+			dAtA[i] = 0x52
+		}
+	}
+	if m.BindsToNode != nil {
+		i--
+		if *m.BindsToNode {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x48
+	}
+	if len(m.Taints) > 0 {
+		for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.AllNodes != nil {
+		i--
+		if *m.AllNodes {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.NodeSelector != nil {
+		{
+			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.NodeName != nil {
+		i -= len(*m.NodeName)
+		copy(dAtA[i:], *m.NodeName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if len(m.ConsumesCounters) > 0 {
+		for iNdEx := len(m.ConsumesCounters) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ConsumesCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Capacity) > 0 {
+		keysForCapacity := make([]string, 0, len(m.Capacity))
+		for k := range m.Capacity {
+			keysForCapacity = append(keysForCapacity, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+		for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForCapacity[iNdEx])
+			copy(dAtA[i:], keysForCapacity[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Attributes) > 0 {
+		keysForAttributes := make([]string, 0, len(m.Attributes))
+		for k := range m.Attributes {
+			keysForAttributes = append(keysForAttributes, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+		for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAttributes[iNdEx])
+			copy(dAtA[i:], keysForAttributes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Requests) > 0 {
+		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Requests[iNdEx])
+			copy(dAtA[i:], m.Requests[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Source)
+	copy(dAtA[i:], m.Source)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Config) > 0 {
+		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Results) > 0 {
+		for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.VersionValue != nil {
+		i -= len(*m.VersionValue)
+		copy(dAtA[i:], *m.VersionValue)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.StringValue != nil {
+		i -= len(*m.StringValue)
+		copy(dAtA[i:], *m.StringValue)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.BoolValue != nil {
+		i--
+		if *m.BoolValue {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.IntValue != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue))
+		i--
+		dAtA[i] = 0x10
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.RequestPolicy != nil {
+		{
+			size, err := m.RequestPolicy.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	{
+		size, err := m.Value.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceClaim) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Config) > 0 {
+		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Constraints) > 0 {
+		for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Requests) > 0 {
+		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if len(m.Requests) > 0 {
+		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Requests[iNdEx])
+			copy(dAtA[i:], m.Requests[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceClass) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceClassList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ExtendedResourceName != nil {
+		i -= len(*m.ExtendedResourceName)
+		copy(dAtA[i:], *m.ExtendedResourceName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ExtendedResourceName)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.Config) > 0 {
+		for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Selectors) > 0 {
+		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Opaque != nil {
+		{
+			size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.DistinctAttribute != nil {
+		i -= len(*m.DistinctAttribute)
+		copy(dAtA[i:], *m.DistinctAttribute)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DistinctAttribute)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.MatchAttribute != nil {
+		i -= len(*m.MatchAttribute)
+		copy(dAtA[i:], *m.MatchAttribute)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Requests) > 0 {
+		for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Requests[iNdEx])
+			copy(dAtA[i:], m.Requests[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceCounterConsumption) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceCounterConsumption) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Counters) > 0 {
+		keysForCounters := make([]string, 0, len(m.Counters))
+		for k := range m.Counters {
+			keysForCounters = append(keysForCounters, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+		for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Counters[string(keysForCounters[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForCounters[iNdEx])
+			copy(dAtA[i:], keysForCounters[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCounters[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.CounterSet)
+	copy(dAtA[i:], m.CounterSet)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.CounterSet)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.FirstAvailable) > 0 {
+		for iNdEx := len(m.FirstAvailable) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.FirstAvailable[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Exactly != nil {
+		{
+			size, err := m.Exactly.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.ConsumedCapacity) > 0 {
+		keysForConsumedCapacity := make([]string, 0, len(m.ConsumedCapacity))
+		for k := range m.ConsumedCapacity {
+			keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+		for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForConsumedCapacity[iNdEx])
+			copy(dAtA[i:], keysForConsumedCapacity[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForConsumedCapacity[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x52
+		}
+	}
+	if m.ShareID != nil {
+		i -= len(*m.ShareID)
+		copy(dAtA[i:], *m.ShareID)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ShareID)))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if len(m.BindingFailureConditions) > 0 {
+		for iNdEx := len(m.BindingFailureConditions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.BindingFailureConditions[iNdEx])
+			copy(dAtA[i:], m.BindingFailureConditions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingFailureConditions[iNdEx])))
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if len(m.BindingConditions) > 0 {
+		for iNdEx := len(m.BindingConditions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.BindingConditions[iNdEx])
+			copy(dAtA[i:], m.BindingConditions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingConditions[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.Tolerations) > 0 {
+		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.AdminAccess != nil {
+		i--
+		if *m.AdminAccess {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	i -= len(m.Device)
+	copy(dAtA[i:], m.Device)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Pool)
+	copy(dAtA[i:], m.Pool)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Request)
+	copy(dAtA[i:], m.Request)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.CEL != nil {
+		{
+			size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceSubRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceSubRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceSubRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Capacity != nil {
+		{
+			size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if len(m.Tolerations) > 0 {
+		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
+	i--
+	dAtA[i] = 0x28
+	i -= len(m.AllocationMode)
+	copy(dAtA[i:], m.AllocationMode)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
+	i--
+	dAtA[i] = 0x22
+	if len(m.Selectors) > 0 {
+		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.DeviceClassName)
+	copy(dAtA[i:], m.DeviceClassName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceTaint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceTaint) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceTaint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TimeAdded != nil {
+		{
+			size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Effect)
+	copy(dAtA[i:], m.Effect)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DeviceToleration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeviceToleration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceToleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TolerationSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds))
+		i--
+		dAtA[i] = 0x28
+	}
+	i -= len(m.Effect)
+	copy(dAtA[i:], m.Effect)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Operator)
+	copy(dAtA[i:], m.Operator)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ExactDeviceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExactDeviceRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExactDeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Capacity != nil {
+		{
+			size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if len(m.Tolerations) > 0 {
+		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.AdminAccess != nil {
+		i--
+		if *m.AdminAccess {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
+	i--
+	dAtA[i] = 0x20
+	i -= len(m.AllocationMode)
+	copy(dAtA[i:], m.AllocationMode)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Selectors) > 0 {
+		for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.DeviceClassName)
+	copy(dAtA[i:], m.DeviceClassName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.HardwareAddress)
+	copy(dAtA[i:], m.HardwareAddress)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.IPs) > 0 {
+		for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.IPs[iNdEx])
+			copy(dAtA[i:], m.IPs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.InterfaceName)
+	copy(dAtA[i:], m.InterfaceName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Resource)
+	copy(dAtA[i:], m.Resource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.APIGroup)
+	copy(dAtA[i:], m.APIGroup)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Devices) > 0 {
+		for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.ReservedFor) > 0 {
+		for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Allocation != nil {
+		{
+			size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourcePool) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceSlice) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.SharedCounters) > 0 {
+		for iNdEx := len(m.SharedCounters) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.SharedCounters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.PerDeviceNodeSelection != nil {
+		i--
+		if *m.PerDeviceNodeSelection {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if len(m.Devices) > 0 {
+		for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.AllNodes != nil {
+		i--
+		if *m.AllNodes {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.NodeSelector != nil {
+		{
+			size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.NodeName != nil {
+		i -= len(*m.NodeName)
+		copy(dAtA[i:], *m.NodeName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	{
+		size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *AllocatedDeviceStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Pool)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Device)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Data != nil {
+		l = m.Data.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NetworkData != nil {
+		l = m.NetworkData.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ShareID != nil {
+		l = len(*m.ShareID)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *AllocationResult) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Devices.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.NodeSelector != nil {
+		l = m.NodeSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AllocationTimestamp != nil {
+		l = m.AllocationTimestamp.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *CELDeviceSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Expression)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *CapacityRequestPolicy) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Default != nil {
+		l = m.Default.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ValidValues) > 0 {
+		for _, e := range m.ValidValues {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ValidRange != nil {
+		l = m.ValidRange.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *CapacityRequestPolicyRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Min != nil {
+		l = m.Min.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Max != nil {
+		l = m.Max.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Step != nil {
+		l = m.Step.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *CapacityRequirements) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Requests) > 0 {
+		for k, v := range m.Requests {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *Counter) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Value.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *CounterSet) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Counters) > 0 {
+		for k, v := range m.Counters {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *Device) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Attributes) > 0 {
+		for k, v := range m.Attributes {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Capacity) > 0 {
+		for k, v := range m.Capacity {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.ConsumesCounters) > 0 {
+		for _, e := range m.ConsumesCounters {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.NodeName != nil {
+		l = len(*m.NodeName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodeSelector != nil {
+		l = m.NodeSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AllNodes != nil {
+		n += 2
+	}
+	if len(m.Taints) > 0 {
+		for _, e := range m.Taints {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.BindsToNode != nil {
+		n += 2
+	}
+	if len(m.BindingConditions) > 0 {
+		for _, s := range m.BindingConditions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.BindingFailureConditions) > 0 {
+		for _, s := range m.BindingFailureConditions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.AllowMultipleAllocations != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *DeviceAllocationConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Source)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Requests) > 0 {
+		for _, s := range m.Requests {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.DeviceConfiguration.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *DeviceAllocationResult) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Results) > 0 {
+		for _, e := range m.Results {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Config) > 0 {
+		for _, e := range m.Config {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DeviceAttribute) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.IntValue != nil {
+		n += 1 + sovGenerated(uint64(*m.IntValue))
+	}
+	if m.BoolValue != nil {
+		n += 2
+	}
+	if m.StringValue != nil {
+		l = len(*m.StringValue)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.VersionValue != nil {
+		l = len(*m.VersionValue)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceCapacity) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Value.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.RequestPolicy != nil {
+		l = m.RequestPolicy.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceClaim) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Requests) > 0 {
+		for _, e := range m.Requests {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Constraints) > 0 {
+		for _, e := range m.Constraints {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Config) > 0 {
+		for _, e := range m.Config {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DeviceClaimConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Requests) > 0 {
+		for _, s := range m.Requests {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.DeviceConfiguration.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *DeviceClass) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *DeviceClassConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.DeviceConfiguration.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *DeviceClassList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DeviceClassSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Selectors) > 0 {
+		for _, e := range m.Selectors {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Config) > 0 {
+		for _, e := range m.Config {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ExtendedResourceName != nil {
+		l = len(*m.ExtendedResourceName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Opaque != nil {
+		l = m.Opaque.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceConstraint) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Requests) > 0 {
+		for _, s := range m.Requests {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.MatchAttribute != nil {
+		l = len(*m.MatchAttribute)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.DistinctAttribute != nil {
+		l = len(*m.DistinctAttribute)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceCounterConsumption) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.CounterSet)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Counters) > 0 {
+		for k, v := range m.Counters {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *DeviceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Exactly != nil {
+		l = m.Exactly.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.FirstAvailable) > 0 {
+		for _, e := range m.FirstAvailable {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DeviceRequestAllocationResult) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Request)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Pool)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Device)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.AdminAccess != nil {
+		n += 2
+	}
+	if len(m.Tolerations) > 0 {
+		for _, e := range m.Tolerations {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.BindingConditions) > 0 {
+		for _, s := range m.BindingConditions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.BindingFailureConditions) > 0 {
+		for _, s := range m.BindingFailureConditions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ShareID != nil {
+		l = len(*m.ShareID)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ConsumedCapacity) > 0 {
+		for k, v := range m.ConsumedCapacity {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *DeviceSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CEL != nil {
+		l = m.CEL.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceSubRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DeviceClassName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Selectors) > 0 {
+		for _, e := range m.Selectors {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.AllocationMode)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Count))
+	if len(m.Tolerations) > 0 {
+		for _, e := range m.Tolerations {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Capacity != nil {
+		l = m.Capacity.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceTaint) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Effect)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TimeAdded != nil {
+		l = m.TimeAdded.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DeviceToleration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operator)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Effect)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TolerationSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TolerationSeconds))
+	}
+	return n
+}
+
+func (m *ExactDeviceRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.DeviceClassName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Selectors) > 0 {
+		for _, e := range m.Selectors {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.AllocationMode)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Count))
+	if m.AdminAccess != nil {
+		n += 2
+	}
+	if len(m.Tolerations) > 0 {
+		for _, e := range m.Tolerations {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Capacity != nil {
+		l = m.Capacity.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *NetworkDeviceData) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.InterfaceName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.IPs) > 0 {
+		for _, s := range m.IPs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.HardwareAddress)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *OpaqueDeviceConfiguration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Parameters.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceClaim) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceClaimConsumerReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.APIGroup)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Resource)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceClaimList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceClaimSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Devices.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceClaimStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Allocation != nil {
+		l = m.Allocation.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ReservedFor) > 0 {
+		for _, e := range m.ReservedFor {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Devices) > 0 {
+		for _, e := range m.Devices {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceClaimTemplate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceClaimTemplateList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceClaimTemplateSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourcePool) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Generation))
+	n += 1 + sovGenerated(uint64(m.ResourceSliceCount))
+	return n
+}
+
+func (m *ResourceSlice) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceSliceList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceSliceSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Pool.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.NodeName != nil {
+		l = len(*m.NodeName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodeSelector != nil {
+		l = m.NodeSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AllNodes != nil {
+		n += 2
+	}
+	if len(m.Devices) > 0 {
+		for _, e := range m.Devices {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.PerDeviceNodeSelection != nil {
+		n += 2
+	}
+	if len(m.SharedCounters) > 0 {
+		for _, e := range m.SharedCounters {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AllocatedDeviceStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]Condition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&AllocatedDeviceStatus{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
+		`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1) + `,`,
+		`NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`,
+		`ShareID:` + valueToStringGenerated(this.ShareID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AllocationResult) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AllocationResult{`,
+		`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`,
+		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
+		`AllocationTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.AllocationTimestamp), "Time", "v1.Time", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CELDeviceSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CELDeviceSelector{`,
+		`Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CapacityRequestPolicy) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForValidValues := "[]Quantity{"
+	for _, f := range this.ValidValues {
+		repeatedStringForValidValues += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForValidValues += "}"
+	s := strings.Join([]string{`&CapacityRequestPolicy{`,
+		`Default:` + strings.Replace(fmt.Sprintf("%v", this.Default), "Quantity", "resource.Quantity", 1) + `,`,
+		`ValidValues:` + repeatedStringForValidValues + `,`,
+		`ValidRange:` + strings.Replace(this.ValidRange.String(), "CapacityRequestPolicyRange", "CapacityRequestPolicyRange", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CapacityRequestPolicyRange) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CapacityRequestPolicyRange{`,
+		`Min:` + strings.Replace(fmt.Sprintf("%v", this.Min), "Quantity", "resource.Quantity", 1) + `,`,
+		`Max:` + strings.Replace(fmt.Sprintf("%v", this.Max), "Quantity", "resource.Quantity", 1) + `,`,
+		`Step:` + strings.Replace(fmt.Sprintf("%v", this.Step), "Quantity", "resource.Quantity", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CapacityRequirements) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForRequests := make([]string, 0, len(this.Requests))
+	for k := range this.Requests {
+		keysForRequests = append(keysForRequests, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+	mapStringForRequests := "map[QualifiedName]resource.Quantity{"
+	for _, k := range keysForRequests {
+		mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)])
+	}
+	mapStringForRequests += "}"
+	s := strings.Join([]string{`&CapacityRequirements{`,
+		`Requests:` + mapStringForRequests + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Counter) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Counter{`,
+		`Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CounterSet) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForCounters := make([]string, 0, len(this.Counters))
+	for k := range this.Counters {
+		keysForCounters = append(keysForCounters, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+	mapStringForCounters := "map[string]Counter{"
+	for _, k := range keysForCounters {
+		mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
+	}
+	mapStringForCounters += "}"
+	s := strings.Join([]string{`&CounterSet{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Counters:` + mapStringForCounters + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Device) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConsumesCounters := "[]DeviceCounterConsumption{"
+	for _, f := range this.ConsumesCounters {
+		repeatedStringForConsumesCounters += strings.Replace(strings.Replace(f.String(), "DeviceCounterConsumption", "DeviceCounterConsumption", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConsumesCounters += "}"
+	repeatedStringForTaints := "[]DeviceTaint{"
+	for _, f := range this.Taints {
+		repeatedStringForTaints += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForTaints += "}"
+	keysForAttributes := make([]string, 0, len(this.Attributes))
+	for k := range this.Attributes {
+		keysForAttributes = append(keysForAttributes, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+	mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
+	for _, k := range keysForAttributes {
+		mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
+	}
+	mapStringForAttributes += "}"
+	keysForCapacity := make([]string, 0, len(this.Capacity))
+	for k := range this.Capacity {
+		keysForCapacity = append(keysForCapacity, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+	mapStringForCapacity := "map[QualifiedName]DeviceCapacity{"
+	for _, k := range keysForCapacity {
+		mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
+	}
+	mapStringForCapacity += "}"
+	s := strings.Join([]string{`&Device{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Attributes:` + mapStringForAttributes + `,`,
+		`Capacity:` + mapStringForCapacity + `,`,
+		`ConsumesCounters:` + repeatedStringForConsumesCounters + `,`,
+		`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
+		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
+		`AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`,
+		`Taints:` + repeatedStringForTaints + `,`,
+		`BindsToNode:` + valueToStringGenerated(this.BindsToNode) + `,`,
+		`BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`,
+		`BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`,
+		`AllowMultipleAllocations:` + valueToStringGenerated(this.AllowMultipleAllocations) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceAllocationConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceAllocationConfiguration{`,
+		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
+		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceAllocationResult) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForResults := "[]DeviceRequestAllocationResult{"
+	for _, f := range this.Results {
+		repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResults += "}"
+	repeatedStringForConfig := "[]DeviceAllocationConfiguration{"
+	for _, f := range this.Config {
+		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConfig += "}"
+	s := strings.Join([]string{`&DeviceAllocationResult{`,
+		`Results:` + repeatedStringForResults + `,`,
+		`Config:` + repeatedStringForConfig + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceAttribute) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceAttribute{`,
+		`IntValue:` + valueToStringGenerated(this.IntValue) + `,`,
+		`BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`,
+		`StringValue:` + valueToStringGenerated(this.StringValue) + `,`,
+		`VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceCapacity) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceCapacity{`,
+		`Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+		`RequestPolicy:` + strings.Replace(this.RequestPolicy.String(), "CapacityRequestPolicy", "CapacityRequestPolicy", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceClaim) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRequests := "[]DeviceRequest{"
+	for _, f := range this.Requests {
+		repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRequests += "}"
+	repeatedStringForConstraints := "[]DeviceConstraint{"
+	for _, f := range this.Constraints {
+		repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConstraints += "}"
+	repeatedStringForConfig := "[]DeviceClaimConfiguration{"
+	for _, f := range this.Config {
+		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConfig += "}"
+	s := strings.Join([]string{`&DeviceClaim{`,
+		`Requests:` + repeatedStringForRequests + `,`,
+		`Constraints:` + repeatedStringForConstraints + `,`,
+		`Config:` + repeatedStringForConfig + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceClaimConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceClaimConfiguration{`,
+		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
+		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceClass) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceClass{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceClassConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceClassConfiguration{`,
+		`DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceClassList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]DeviceClass{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&DeviceClassList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceClassSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSelectors := "[]DeviceSelector{"
+	for _, f := range this.Selectors {
+		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSelectors += "}"
+	repeatedStringForConfig := "[]DeviceClassConfiguration{"
+	for _, f := range this.Config {
+		repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConfig += "}"
+	s := strings.Join([]string{`&DeviceClassSpec{`,
+		`Selectors:` + repeatedStringForSelectors + `,`,
+		`Config:` + repeatedStringForConfig + `,`,
+		`ExtendedResourceName:` + valueToStringGenerated(this.ExtendedResourceName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceConfiguration{`,
+		`Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceConstraint) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceConstraint{`,
+		`Requests:` + fmt.Sprintf("%v", this.Requests) + `,`,
+		`MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`,
+		`DistinctAttribute:` + valueToStringGenerated(this.DistinctAttribute) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceCounterConsumption) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForCounters := make([]string, 0, len(this.Counters))
+	for k := range this.Counters {
+		keysForCounters = append(keysForCounters, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+	mapStringForCounters := "map[string]Counter{"
+	for _, k := range keysForCounters {
+		mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
+	}
+	mapStringForCounters += "}"
+	s := strings.Join([]string{`&DeviceCounterConsumption{`,
+		`CounterSet:` + fmt.Sprintf("%v", this.CounterSet) + `,`,
+		`Counters:` + mapStringForCounters + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForFirstAvailable := "[]DeviceSubRequest{"
+	for _, f := range this.FirstAvailable {
+		repeatedStringForFirstAvailable += strings.Replace(strings.Replace(f.String(), "DeviceSubRequest", "DeviceSubRequest", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForFirstAvailable += "}"
+	s := strings.Join([]string{`&DeviceRequest{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Exactly:` + strings.Replace(this.Exactly.String(), "ExactDeviceRequest", "ExactDeviceRequest", 1) + `,`,
+		`FirstAvailable:` + repeatedStringForFirstAvailable + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceRequestAllocationResult) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForTolerations := "[]DeviceToleration{"
+	for _, f := range this.Tolerations {
+		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTolerations += "}"
+	keysForConsumedCapacity := make([]string, 0, len(this.ConsumedCapacity))
+	for k := range this.ConsumedCapacity {
+		keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+	mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{"
+	for _, k := range keysForConsumedCapacity {
+		mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)])
+	}
+	mapStringForConsumedCapacity += "}"
+	s := strings.Join([]string{`&DeviceRequestAllocationResult{`,
+		`Request:` + fmt.Sprintf("%v", this.Request) + `,`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`Pool:` + fmt.Sprintf("%v", this.Pool) + `,`,
+		`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+		`AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
+		`Tolerations:` + repeatedStringForTolerations + `,`,
+		`BindingConditions:` + fmt.Sprintf("%v", this.BindingConditions) + `,`,
+		`BindingFailureConditions:` + fmt.Sprintf("%v", this.BindingFailureConditions) + `,`,
+		`ShareID:` + valueToStringGenerated(this.ShareID) + `,`,
+		`ConsumedCapacity:` + mapStringForConsumedCapacity + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceSelector{`,
+		`CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceSubRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSelectors := "[]DeviceSelector{"
+	for _, f := range this.Selectors {
+		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSelectors += "}"
+	repeatedStringForTolerations := "[]DeviceToleration{"
+	for _, f := range this.Tolerations {
+		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTolerations += "}"
+	s := strings.Join([]string{`&DeviceSubRequest{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
+		`Selectors:` + repeatedStringForSelectors + `,`,
+		`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
+		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+		`Tolerations:` + repeatedStringForTolerations + `,`,
+		`Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeviceToleration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeviceToleration{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
+		`TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExactDeviceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSelectors := "[]DeviceSelector{"
+	for _, f := range this.Selectors {
+		repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSelectors += "}"
+	repeatedStringForTolerations := "[]DeviceToleration{"
+	for _, f := range this.Tolerations {
+		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "DeviceToleration", "DeviceToleration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTolerations += "}"
+	s := strings.Join([]string{`&ExactDeviceRequest{`,
+		`DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`,
+		`Selectors:` + repeatedStringForSelectors + `,`,
+		`AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`,
+		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+		`AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`,
+		`Tolerations:` + repeatedStringForTolerations + `,`,
+		`Capacity:` + strings.Replace(this.Capacity.String(), "CapacityRequirements", "CapacityRequirements", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NetworkDeviceData) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NetworkDeviceData{`,
+		`InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`,
+		`IPs:` + fmt.Sprintf("%v", this.IPs) + `,`,
+		`HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *OpaqueDeviceConfiguration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&OpaqueDeviceConfiguration{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaim) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceClaim{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimConsumerReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceClaimConsumerReference{`,
+		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
+		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ResourceClaim{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ResourceClaimList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceClaimSpec{`,
+		`Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{"
+	for _, f := range this.ReservedFor {
+		repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForReservedFor += "}"
+	repeatedStringForDevices := "[]AllocatedDeviceStatus{"
+	for _, f := range this.Devices {
+		repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForDevices += "}"
+	s := strings.Join([]string{`&ResourceClaimStatus{`,
+		`Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`,
+		`ReservedFor:` + repeatedStringForReservedFor + `,`,
+		`Devices:` + repeatedStringForDevices + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimTemplate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceClaimTemplate{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimTemplateList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ResourceClaimTemplate{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ResourceClaimTemplateList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaimTemplateSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceClaimTemplateSpec{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourcePool) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourcePool{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
+		`ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceSlice) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceSlice{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceSliceList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ResourceSlice{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ResourceSliceList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceSliceSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForDevices := "[]Device{"
+	for _, f := range this.Devices {
+		repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForDevices += "}"
+	repeatedStringForSharedCounters := "[]CounterSet{"
+	for _, f := range this.SharedCounters {
+		repeatedStringForSharedCounters += strings.Replace(strings.Replace(f.String(), "CounterSet", "CounterSet", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSharedCounters += "}"
+	s := strings.Join([]string{`&ResourceSliceSpec{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`,
+		`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
+		`NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`,
+		`AllNodes:` + valueToStringGenerated(this.AllNodes) + `,`,
+		`Devices:` + repeatedStringForDevices + `,`,
+		`PerDeviceNodeSelection:` + valueToStringGenerated(this.PerDeviceNodeSelection) + `,`,
+		`SharedCounters:` + repeatedStringForSharedCounters + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Pool = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Device = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, v1.Condition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Data == nil {
+				m.Data = &runtime.RawExtension{}
+			}
+			if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NetworkData == nil {
+				m.NetworkData = &NetworkDeviceData{}
+			}
+			if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.ShareID = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AllocationResult) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeSelector == nil {
+				m.NodeSelector = &v11.NodeSelector{}
+			}
+			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocationTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AllocationTimestamp == nil {
+				m.AllocationTimestamp = &v1.Time{}
+			}
+			if err := m.AllocationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Expression = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CapacityRequestPolicy) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CapacityRequestPolicy: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CapacityRequestPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Default == nil {
+				m.Default = &resource.Quantity{}
+			}
+			if err := m.Default.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ValidValues", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ValidValues = append(m.ValidValues, resource.Quantity{})
+			if err := m.ValidValues[len(m.ValidValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ValidRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ValidRange == nil {
+				m.ValidRange = &CapacityRequestPolicyRange{}
+			}
+			if err := m.ValidRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CapacityRequestPolicyRange) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CapacityRequestPolicyRange: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CapacityRequestPolicyRange: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Min == nil {
+				m.Min = &resource.Quantity{}
+			}
+			if err := m.Min.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Max == nil {
+				m.Max = &resource.Quantity{}
+			}
+			if err := m.Max.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Step == nil {
+				m.Step = &resource.Quantity{}
+			}
+			if err := m.Step.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CapacityRequirements) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CapacityRequirements: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CapacityRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Requests == nil {
+				m.Requests = make(map[QualifiedName]resource.Quantity)
+			}
+			var mapkey QualifiedName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Requests[QualifiedName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Counter) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Counter: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CounterSet) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CounterSet: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CounterSet: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Counters == nil {
+				m.Counters = make(map[string]Counter)
+			}
+			var mapkey string
+			mapvalue := &Counter{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &Counter{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Counters[mapkey] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Device) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Device: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Attributes == nil {
+				m.Attributes = make(map[QualifiedName]DeviceAttribute)
+			}
+			var mapkey QualifiedName
+			mapvalue := &DeviceAttribute{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &DeviceAttribute{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Attributes[QualifiedName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capacity == nil {
+				m.Capacity = make(map[QualifiedName]DeviceCapacity)
+			}
+			var mapkey QualifiedName
+			mapvalue := &DeviceCapacity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &DeviceCapacity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Capacity[QualifiedName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumesCounters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ConsumesCounters = append(m.ConsumesCounters, DeviceCounterConsumption{})
+			if err := m.ConsumesCounters[len(m.ConsumesCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.NodeName = &s
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeSelector == nil {
+				m.NodeSelector = &v11.NodeSelector{}
+			}
+			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AllNodes = &b
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Taints = append(m.Taints, DeviceTaint{})
+			if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BindsToNode", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.BindsToNode = &b
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllowMultipleAllocations", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AllowMultipleAllocations = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Results = append(m.Results, DeviceRequestAllocationResult{})
+			if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Config = append(m.Config, DeviceAllocationConfiguration{})
+			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceAttribute) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IntValue = &v
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.BoolValue = &b
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.StringValue = &s
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.VersionValue = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceCapacity) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestPolicy", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequestPolicy == nil {
+				m.RequestPolicy = &CapacityRequestPolicy{}
+			}
+			if err := m.RequestPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceClaim) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Requests = append(m.Requests, DeviceRequest{})
+			if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Constraints = append(m.Constraints, DeviceConstraint{})
+			if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Config = append(m.Config, DeviceClaimConfiguration{})
+			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceClass) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceClassList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, DeviceClass{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Selectors = append(m.Selectors, DeviceSelector{})
+			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Config = append(m.Config, DeviceClassConfiguration{})
+			if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.ExtendedResourceName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Opaque == nil {
+				m.Opaque = &OpaqueDeviceConfiguration{}
+			}
+			if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceConstraint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := FullyQualifiedName(dAtA[iNdEx:postIndex])
+			m.MatchAttribute = &s
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DistinctAttribute", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := FullyQualifiedName(dAtA[iNdEx:postIndex])
+			m.DistinctAttribute = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceCounterConsumption) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceCounterConsumption: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceCounterConsumption: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CounterSet", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CounterSet = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Counters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Counters == nil {
+				m.Counters = make(map[string]Counter)
+			}
+			var mapkey string
+			mapvalue := &Counter{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &Counter{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Counters[mapkey] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exactly", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Exactly == nil {
+				m.Exactly = &ExactDeviceRequest{}
+			}
+			if err := m.Exactly.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FirstAvailable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FirstAvailable = append(m.FirstAvailable, DeviceSubRequest{})
+			if err := m.FirstAvailable[len(m.FirstAvailable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Request = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Pool = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Device = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AdminAccess = &b
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Tolerations = append(m.Tolerations, DeviceToleration{})
+			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BindingConditions", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BindingConditions = append(m.BindingConditions, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BindingFailureConditions", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BindingFailureConditions = append(m.BindingFailureConditions, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShareID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			m.ShareID = &s
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConsumedCapacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConsumedCapacity == nil {
+				m.ConsumedCapacity = make(map[QualifiedName]resource.Quantity)
+			}
+			var mapkey QualifiedName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.ConsumedCapacity[QualifiedName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CEL == nil {
+				m.CEL = &CELDeviceSelector{}
+			}
+			if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceSubRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceSubRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceSubRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DeviceClassName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Selectors = append(m.Selectors, DeviceSelector{})
+			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Tolerations = append(m.Tolerations, DeviceToleration{})
+			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capacity == nil {
+				m.Capacity = &CapacityRequirements{}
+			}
+			if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceTaint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceTaint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceTaint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.TimeAdded == nil {
+				m.TimeAdded = &v1.Time{}
+			}
+			if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeviceToleration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeviceToleration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeviceToleration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operator = DeviceTolerationOperator(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Effect = DeviceTaintEffect(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TolerationSeconds = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExactDeviceRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExactDeviceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExactDeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DeviceClassName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Selectors = append(m.Selectors, DeviceSelector{})
+			if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AdminAccess = &b
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Tolerations = append(m.Tolerations, DeviceToleration{})
+			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capacity == nil {
+				m.Capacity = &CapacityRequirements{}
+			}
+			if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InterfaceName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HardwareAddress = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIGroup = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ResourceClaim{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Allocation == nil {
+				m.Allocation = &AllocationResult{}
+			}
+			if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{})
+			if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Devices = append(m.Devices, AllocatedDeviceStatus{})
+			if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ResourceClaimTemplate{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourcePool) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+			}
+			m.Generation = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Generation |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType)
+			}
+			m.ResourceSliceCount = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ResourceSliceCount |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceSlice) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceSliceList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ResourceSlice{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.NodeName = &s
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeSelector == nil {
+				m.NodeSelector = &v11.NodeSelector{}
+			}
+			if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AllNodes = &b
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Devices = append(m.Devices, Device{})
+			if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PerDeviceNodeSelection", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.PerDeviceNodeSelection = &b
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SharedCounters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SharedCounters = append(m.SharedCounters, CounterSet{})
+			if err := m.SharedCounters[len(m.SharedCounters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/resource/v1/generated.proto b/vendor/k8s.io/api/resource/v1/generated.proto
new file mode 100644
index 000000000..816a430c2
--- /dev/null
+++ b/vendor/k8s.io/api/resource/v1/generated.proto
@@ -0,0 +1,1589 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.api.resource.v1;
+
+import "k8s.io/api/core/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/api/resource/v1";
+
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+//
+// The combination of Driver, Pool, Device, and ShareID must match the corresponding key
+// in Status.Allocation.Devices.
+message AllocatedDeviceStatus {
+  // Driver specifies the name of the DRA driver whose kubelet
+  // plugin should be invoked to process the allocation once the claim is
+  // needed on a node.
+  //
+  // Must be a DNS subdomain and should end with a DNS domain owned by the
+  // vendor of the driver.
+  //
+  // +required
+  optional string driver = 1;
+
+  // This name together with the driver name and the device name field
+  // identify which device was allocated (`//`).
+  //
+  // Must not be longer than 253 characters and may contain one or more
+  // DNS sub-domains separated by slashes.
+  //
+  // +required
+  optional string pool = 2;
+
+  // Device references one device instance via its name in the driver's
+  // resource pool. It must be a DNS label.
+  //
+  // +required
+  optional string device = 3;
+
+  // ShareID uniquely identifies an individual allocation share of the device.
+  //
+  // +optional
+  // +featureGate=DRAConsumableCapacity
+  optional string shareID = 7;
+
+  // Conditions contains the latest observation of the device's state.
+  // If the device has been configured according to the class and claim
+  // config references, the `Ready` condition should be True.
+  //
+  // Must not contain more than 8 entries.
+  //
+  // +optional
+  // +listType=map
+  // +listMapKey=type
+  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4;
+
+  // Data contains arbitrary driver-specific data.
+  //
+  // The length of the raw data must be smaller or equal to 10 Ki.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5;
+
+  // NetworkData contains network-related information specific to the device.
+  //
+  // +optional
+  optional NetworkDeviceData networkData = 6;
+}
+
+// AllocationResult contains attributes of an allocated resource.
+message AllocationResult {
+  // Devices is the result of allocating devices.
+  //
+  // +optional
+  optional DeviceAllocationResult devices = 1;
+
+  // NodeSelector defines where the allocated resources are available. If
+  // unset, they are available everywhere.
+  //
+  // +optional
+  optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3;
+
+  // AllocationTimestamp stores the time when the resources were allocated.
+  // This field is not guaranteed to be set, in which case that time is unknown.
+  //
+  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+  // feature gate.
+  //
+  // +optional
+  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time allocationTimestamp = 5;
+}
+
+// CELDeviceSelector contains a CEL expression for selecting a device.
+message CELDeviceSelector {
+  // Expression is a CEL expression which evaluates a single device. It
+  // must evaluate to true when the device under consideration satisfies
+  // the desired criteria, and false when it does not. Any other result
+  // is an error and causes allocation of devices to abort.
+  //
+  // The expression's input is an object named "device", which carries
+  // the following properties:
+  //  - driver (string): the name of the driver which defines this device.
+  //  - attributes (map[string]object): the device's attributes, grouped by prefix
+  //    (e.g. device.attributes["dra.example.com"] evaluates to an object with all
+  //    of the attributes which were prefixed by "dra.example.com".
+  //  - capacity (map[string]object): the device's capacities, grouped by prefix.
+  //  - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device
+  //    (v1.34+ with the DRAConsumableCapacity feature enabled).
+  //
+  // Example: Consider a device with driver="dra.example.com", which exposes
+  // two attributes named "model" and "ext.example.com/family" and which
+  // exposes one capacity named "modules". This input to this expression
+  // would have the following fields:
+  //
+  //     device.driver
+  //     device.attributes["dra.example.com"].model
+  //     device.attributes["ext.example.com"].family
+  //     device.capacity["dra.example.com"].modules
+  //
+  // The device.driver field can be used to check for a specific driver,
+  // either as a high-level precondition (i.e. you only want to consider
+  // devices from this driver) or as part of a multi-clause expression
+  // that is meant to consider devices from different drivers.
+  //
+  // The value type of each attribute is defined by the device
+  // definition, and users who write these expressions must consult the
+  // documentation for their specific drivers. The value type of each
+  // capacity is Quantity.
+  //
+  // If an unknown prefix is used as a lookup in either device.attributes
+  // or device.capacity, an empty map will be returned. Any reference to
+  // an unknown field will cause an evaluation error and allocation to
+  // abort.
+  //
+  // A robust expression should check for the existence of attributes
+  // before referencing them.
+  //
+  // For ease of use, the cel.bind() function is enabled, and can be used
+  // to simplify expressions that access multiple attributes with the
+  // same domain. For example:
+  //
+  //     cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
+  //
+  // The length of the expression must be smaller or equal to 10 Ki. The
+  // cost of evaluating it is also limited based on the estimated number
+  // of logical steps.
+  //
+  // +required
+  optional string expression = 1;
+}
+
+// CapacityRequestPolicy defines how requests consume device capacity.
+//
+// Must not set more than one ValidRequestValues.
+message CapacityRequestPolicy {
+  // Default specifies how much of this capacity is consumed by a request
+  // that does not contain an entry for it in DeviceRequest's Capacity.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity default = 1;
+
+  // ValidValues defines a set of acceptable quantity values in consuming requests.
+  //
+  // Must not contain more than 10 entries.
+  // Must be sorted in ascending order.
+  //
+  // If this field is set,
+  // Default must be defined and it must be included in ValidValues list.
+  //
+  // If the requested amount does not match any valid value but smaller than some valid values,
+  // the scheduler calculates the smallest valid value that is greater than or equal to the request.
+  // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues).
+  //
+  // If the requested amount exceeds all valid values, the request violates the policy,
+  // and this device cannot be allocated.
+  //
+  // +optional
+  // +listType=atomic
+  // +oneOf=ValidRequestValues
+  repeated .k8s.io.apimachinery.pkg.api.resource.Quantity validValues = 3;
+
+  // ValidRange defines an acceptable quantity value range in consuming requests.
+  //
+  // If this field is set,
+  // Default must be defined and it must fall within the defined ValidRange.
+  //
+  // If the requested amount does not fall within the defined range, the request violates the policy,
+  // and this device cannot be allocated.
+  //
+  // If the request doesn't contain this capacity entry, Default value is used.
+  //
+  // +optional
+  // +oneOf=ValidRequestValues
+  optional CapacityRequestPolicyRange validRange = 4;
+}
+
+// CapacityRequestPolicyRange defines a valid range for consumable capacity values.
+//
+//   - If the requested amount is less than Min, it is rounded up to the Min value.
+//   - If Step is set and the requested amount is between Min and Max but not aligned with Step,
+//     it will be rounded up to the next value equal to Min + (n * Step).
+//   - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set).
+//   - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy,
+//     and the device cannot be allocated.
+message CapacityRequestPolicyRange {
+  // Min specifies the minimum capacity allowed for a consumption request.
+  //
+  // Min must be greater than or equal to zero,
+  // and less than or equal to the capacity value.
+  // requestPolicy.default must be more than or equal to the minimum.
+  //
+  // +required
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity min = 1;
+
+  // Max defines the upper limit for capacity that can be requested.
+  //
+  // Max must be less than or equal to the capacity value.
+  // Min and requestPolicy.default must be less than or equal to the maximum.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity max = 2;
+
+  // Step defines the step size between valid capacity amounts within the range.
+  //
+  // Max (if set) and requestPolicy.default must be a multiple of Step.
+  // Min + Step must be less than or equal to the capacity value.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity step = 3;
+}
+
+// CapacityRequirements defines the capacity requirements for a specific device request.
+message CapacityRequirements {
+  // Requests represent individual device resource requests for distinct resources,
+  // all of which must be provided by the device.
+  //
+  // This value is used as an additional filtering condition against the available capacity on the device.
+  // This is semantically equivalent to a CEL selector with
+  // `device.capacity[]..compareTo(quantity()) >= 0`.
+  // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0.
+  //
+  // When a requestPolicy is defined, the requested amount is adjusted upward
+  // to the nearest valid value based on the policy.
+  // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows—
+  // the device is considered ineligible for allocation.
+  //
+  // For any capacity that is not explicitly requested:
+  // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity
+  //   (i.e., the whole device is claimed).
+  // - If a requestPolicy is set, the default consumed capacity is determined according to that policy.
+  //
+  // If the device allows multiple allocation,
+  // the aggregated amount across all requests must not exceed the capacity value.
+  // The consumed capacity, which may be adjusted based on the requestPolicy if defined,
+  // is recorded in the resource claim’s status.devices[*].consumedCapacity field.
+  //
+  // +optional
+  map requests = 1;
+}
+
+// Counter describes a quantity associated with a device.
+message Counter {
+  // Value defines how much of a certain device counter is available.
+  //
+  // +required
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1;
+}
+
+// CounterSet defines a named set of counters
+// that are available to be used by devices defined in the
+// ResourceSlice.
+//
+// The counters are not allocatable by themselves, but
+// can be referenced by devices. When a device is allocated,
+// the portion of counters it uses will no longer be available for use
+// by other devices.
+message CounterSet {
+  // Name defines the name of the counter set.
+  // It must be a DNS label.
+  //
+  // +required
+  optional string name = 1;
+
+  // Counters defines the set of counters for this CounterSet
+  // The name of each counter must be unique in that set and must be a DNS label.
+  //
+  // The maximum number of counters in all sets is 32.
+  //
+  // +required
+  map counters = 2;
+}
+
+// Device represents one individual hardware instance that can be selected based
+// on its attributes. Besides the name, exactly one field must be set.
+message Device {
+  // Name is unique identifier among all devices managed by
+  // the driver in the pool. It must be a DNS label.
+  //
+  // +required
+  optional string name = 1;
+
+  // Attributes defines the set of attributes for this device.
+  // The name of each attribute must be unique in that set.
+  //
+  // The maximum number of attributes and capacities combined is 32.
+  //
+  // +optional
+  map attributes = 2;
+
+  // Capacity defines the set of capacities for this device.
+  // The name of each capacity must be unique in that set.
+  //
+  // The maximum number of attributes and capacities combined is 32.
+  //
+  // +optional
+  map capacity = 3;
+
+  // ConsumesCounters defines a list of references to sharedCounters
+  // and the set of counters that the device will
+  // consume from those counter sets.
+  //
+  // There can only be a single entry per counterSet.
+  //
+  // The total number of device counter consumption entries
+  // must be <= 32. In addition, the total number in the
+  // entire ResourceSlice must be <= 1024 (for example,
+  // 64 devices with 16 counters each).
+  //
+  // +optional
+  // +listType=atomic
+  // +featureGate=DRAPartitionableDevices
+  repeated DeviceCounterConsumption consumesCounters = 4;
+
+  // NodeName identifies the node where the device is available.
+  //
+  // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+  // At most one of NodeName, NodeSelector and AllNodes can be set.
+  //
+  // +optional
+  // +oneOf=DeviceNodeSelection
+  // +featureGate=DRAPartitionableDevices
+  optional string nodeName = 5;
+
+  // NodeSelector defines the nodes where the device is available.
+  //
+  // Must use exactly one term.
+  //
+  // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+  // At most one of NodeName, NodeSelector and AllNodes can be set.
+  //
+  // +optional
+  // +oneOf=DeviceNodeSelection
+  // +featureGate=DRAPartitionableDevices
+  optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 6;
+
+  // AllNodes indicates that all nodes have access to the device.
+  //
+  // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+  // At most one of NodeName, NodeSelector and AllNodes can be set.
+  //
+  // +optional
+  // +oneOf=DeviceNodeSelection
+  // +featureGate=DRAPartitionableDevices
+  optional bool allNodes = 7;
+
+  // If specified, these are the driver-defined taints.
+  //
+  // The maximum number of taints is 4.
+  //
+  // This is an alpha field and requires enabling the DRADeviceTaints
+  // feature gate.
+  //
+  // +optional
+  // +listType=atomic
+  // +featureGate=DRADeviceTaints
+  repeated DeviceTaint taints = 8;
+
+  // BindsToNode indicates if the usage of an allocation involving this device
+  // has to be limited to exactly the node that was chosen when allocating the claim.
+  // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector
+  // to match the node where the allocation was made.
+  //
+  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+  // feature gates.
+  //
+  // +optional
+  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+  optional bool bindsToNode = 9;
+
+  // BindingConditions defines the conditions for proceeding with binding.
+  // All of these conditions must be set in the per-device status
+  // conditions with a value of True to proceed with binding the pod to the node
+  // while scheduling the pod.
+  //
+  // The maximum number of binding conditions is 4.
+  //
+  // The conditions must be a valid condition type string.
+  //
+  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+  // feature gates.
+  //
+  // +optional
+  // +listType=atomic
+  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+  repeated string bindingConditions = 10;
+
+  // BindingFailureConditions defines the conditions for binding failure.
+  // They may be set in the per-device status conditions.
+  // If any is set to "True", a binding failure occurred.
+  //
+  // The maximum number of binding failure conditions is 4.
+  //
+  // The conditions must be a valid condition type string.
+  //
+  // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+  // feature gates.
+  //
+  // +optional
+  // +listType=atomic
+  // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+  repeated string bindingFailureConditions = 11;
+
+  // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
+  //
+  // If AllowMultipleAllocations is set to true, the device can be allocated more than once,
+  // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not.
+  //
+  // +optional
+  // +featureGate=DRAConsumableCapacity
+  optional bool allowMultipleAllocations = 12;
+}
+
+// DeviceAllocationConfiguration gets embedded in an AllocationResult.
+message DeviceAllocationConfiguration {
+  // Source records whether the configuration comes from a class and thus
+  // is not something that a normal user would have been able to set
+  // or from a claim.
+  //
+  // +required
+  optional string source = 1;
+
+  // Requests lists the names of requests where the configuration applies.
+  // If empty, its applies to all requests.
+  //
+  // References to subrequests must include the name of the main request
+  // and may include the subrequest using the format 
[/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceCapacity describes a quantity associated with a device. +message DeviceCapacity { + // Value defines how much of a certain capacity that device has. + // + // This field reflects the fixed total capacity and does not change. + // The consumed amount is tracked separately by scheduler + // and does not affect this value. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // RequestPolicy defines how this DeviceCapacity must be consumed + // when the device is allowed to be shared by multiple allocations. + // + // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy. + // + // If unset, capacity requests are unconstrained: + // requests can consume any amount of capacity, as long as the total consumed + // across all allocations does not exceed the device's defined capacity. + // If request is also unset, default is the full capacity value. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequestPolicy requestPolicy = 2; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the configuration applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // ExtendedResourceName is the extended resource name for the devices of this class. + // The devices of this class can be used to satisfy a pod's extended resource requests. + // It has the same format as the name of a pod's extended resource. + // It should be unique among all the device classes in a cluster. + // If two device classes have the same name, then the class created later + // is picked to satisfy a pod's extended resource requests. + // If two classes are created at the same time, then the name of the class + // lexicographically sorted first is picked. + // + // This is an alpha field. + // +optional + // +featureGate=DRAExtendedResource + optional string extendedResourceName = 4; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // References to subrequests must include the name of the main request + // and may include the subrequest using the format
[/]. If just + // the main request is given, the constraint applies to all subrequests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; + + // DistinctAttribute requires that all devices in question have this + // attribute and that its type and value are unique across those devices. + // + // This acts as the inverse of MatchAttribute. + // + // This constraint is used to avoid allocating multiple requests to the same device + // by ensuring attribute-level differentiation. + // + // This is useful for scenarios where resource requests must be fulfilled by separate physical devices. + // For example, a container requests two network interfaces that must be allocated from two different physical NICs. + // + // +optional + // +oneOf=ConstraintType + // +featureGate=DRAConsumableCapacity + optional string distinctAttribute = 3; +} + +// DeviceCounterConsumption defines a set of counters that +// a device will consume from a CounterSet. +message DeviceCounterConsumption { + // CounterSet is the name of the set from which the + // counters defined will be consumed. + // + // +required + optional string counterSet = 1; + + // Counters defines the counters that will be consumed by the device. + // + // The maximum number counters in a device is 32. + // In addition, the maximum number of all counters + // in all devices is 1024 (for example, 64 devices with + // 16 counters each). + // + // +required + map counters = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. With FirstAvailable it is also +// possible to provide a prioritized list of requests. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // References using the name in the DeviceRequest will uniquely + // identify a request when the Exactly field is set. When the + // FirstAvailable field is set, a reference to the name of the + // DeviceRequest will match whatever subrequest is chosen by the + // scheduler. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // Exactly specifies the details for a single request that must + // be met exactly for the request to be satisfied. + // + // One of Exactly or FirstAvailable must be set. + // + // +optional + // +oneOf=deviceRequestType + optional ExactDeviceRequest exactly = 2; + + // FirstAvailable contains subrequests, of which exactly one will be + // selected by the scheduler. It tries to + // satisfy them in the order in which they are listed here. So if + // there are two entries in the list, the scheduler will only check + // the second one if it determines that the first one can not be used. + // + // DRA does not yet implement scoring, so the scheduler will + // select the first set of devices that satisfies all the + // requests in the claim. And if the requirements can + // be satisfied on more than one node, other scheduling features + // will determine which node is chosen. This means that the set of + // devices allocated to a claim might not be the optimal set + // available to the cluster. Scoring will be implemented later. + // + // +optional + // +oneOf=deviceRequestType + // +listType=atomic + // +featureGate=DRAPrioritizedList + repeated DeviceSubRequest firstAvailable = 3; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. If it references a subrequest in the + // firstAvailable list on a DeviceRequest, this field must + // include both the name of the main request and the subrequest + // using the format
/. + // + // Multiple devices may have been allocated per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; + + // A copy of all tolerations specified in the request at the time + // when the device got allocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // BindingConditions contains a copy of the BindingConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingConditions = 7; + + // BindingFailureConditions contains a copy of the BindingFailureConditions + // from the corresponding ResourceSlice at the time of allocation. + // + // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus + // feature gates. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus + repeated string bindingFailureConditions = 8; + + // ShareID uniquely identifies an individual allocation share of the device, + // used when the device supports multiple simultaneous allocations. + // It serves as an additional map key to differentiate concurrent shares + // of the same device. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional string shareID = 9; + + // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. + // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid + // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). + // + // The total consumed capacity for each device must not exceed the DeviceCapacity's Value. + // + // This field is populated only for devices that allow multiple allocations. + // All capacity entries are included, even if the consumed amount is zero. + // + // +optional + // +featureGate=DRAConsumableCapacity + map consumedCapacity = 10; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// DeviceSubRequest describes a request for device provided in the +// claim.spec.devices.requests[].firstAvailable array. Each +// is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// DeviceSubRequest is similar to ExactDeviceRequest, but doesn't expose the +// AdminAccess field as that one is only supported when requesting a +// specific device. +message DeviceSubRequest { + // Name can be used to reference this subrequest in the list of constraints + // or the list of configurations for the claim. References must use the + // format
/. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // subrequest. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // subrequest. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this subrequest. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This subrequest is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AllocationMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other subrequests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // If specified, the request's tolerations. + // + // Tolerations for NoSchedule are required to allocate a + // device which has a taint with that effect. The same applies + // to NoExecute. + // + // In addition, should any of the allocated devices get tainted + // with NoExecute after allocation and that effect is not tolerated, + // then all pods consuming the ResourceClaim get deleted to evict + // them. The scheduler will not let new pods reserve the claim while + // it has these tainted devices. Once all pods are evicted, the + // claim will get deallocated. + // + // The maximum number of tolerations is 16. + // + // This is an alpha field and requires enabling the DRADeviceTaints + // feature gate. + // + // +optional + // +listType=atomic + // +featureGate=DRADeviceTaints + repeated DeviceToleration tolerations = 6; + + // Capacity define resource requirements against each capacity. + // + // If this field is unset and the device supports multiple allocations, + // the default value will be applied to each capacity according to requestPolicy. + // For the capacity that has no requestPolicy, default is the full capacity value. + // + // Applies to each device allocation. + // If Count > 1, + // the request fails if there aren't enough devices that meet the requirements. + // If AllocationMode is set to All, + // the request fails if there are devices that otherwise match the request, + // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request. + // + // +optional + // +featureGate=DRAConsumableCapacity + optional CapacityRequirements capacity = 7; +} + +// The device this taint is attached to has the "effect" on +// any claim which does not tolerate the taint and, through the claim, +// to pods using the claim. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeviceTaint { + // The taint key to be applied to a device. + // Must be a label name. + // + // +required + optional string key = 1; + + // The taint value corresponding to the taint key. + // Must be a label value. + // + // +optional + optional string value = 2; + + // The effect of the taint on claims that do not tolerate the taint + // and through such claims on the pods using them. + // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for + // nodes is not valid here. + // + // +required + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // Added automatically during create or update if not set. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message DeviceToleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // Must be a label name. + // + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a ResourceClaim can + // tolerate all taints of a particular category. + // + // +optional + // +default="Equal" + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value must be empty, otherwise just a regular string. + // Must be a label value. + // + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule and NoExecute. + // + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // If larger than zero, the time when the pod needs to be evicted is calculated as